2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/dpo/load_balance.h>
23 stats_main_t stats_main;
25 #include <vnet/ip/ip.h>
27 #include <vpp/api/vpe_msg_enum.h>
30 #define f64_print(a,b)
32 #define vl_typedefs /* define message structures */
33 #include <vpp/api/vpe_all_api_h.h>
36 #define vl_endianfun /* define message structures */
37 #include <vpp/api/vpe_all_api_h.h>
40 /* instantiate all the print functions we know about */
41 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
43 #include <vpp/api/vpe_all_api_h.h>
46 #define foreach_stats_msg \
47 _(WANT_STATS, want_stats) \
48 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
49 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
50 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
51 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
52 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
53 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
54 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
55 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
56 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
57 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
58 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
59 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
60 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats)
63 /* These constants ensure msg sizes <= 1024, aka ring allocation */
64 #define SIMPLE_COUNTER_BATCH_SIZE 126
65 #define COMBINED_COUNTER_BATCH_SIZE 63
66 #define IP4_FIB_COUNTER_BATCH_SIZE 48
67 #define IP6_FIB_COUNTER_BATCH_SIZE 30
70 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
74 format_vnet_interface_combined_counters (u8 * s, va_list * args)
76 stats_main_t *sm = &stats_main;
77 vl_api_vnet_interface_combined_counters_t *mp =
78 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
81 u32 count, sw_if_index;
83 count = ntohl (mp->count);
84 sw_if_index = ntohl (mp->first_sw_if_index);
88 vp = (vlib_counter_t *) mp->data;
90 switch (mp->vnet_counter_type)
92 case VNET_INTERFACE_COUNTER_RX:
95 case VNET_INTERFACE_COUNTER_TX:
99 counter_name = "bogus";
102 for (i = 0; i < count; i++)
104 packets = clib_mem_unaligned (&vp->packets, u64);
105 packets = clib_net_to_host_u64 (packets);
106 bytes = clib_mem_unaligned (&vp->bytes, u64);
107 bytes = clib_net_to_host_u64 (bytes);
109 s = format (s, "%U.%s.packets %lld\n",
110 format_vnet_sw_if_index_name,
111 sm->vnet_main, sw_if_index, counter_name, packets);
112 s = format (s, "%U.%s.bytes %lld\n",
113 format_vnet_sw_if_index_name,
114 sm->vnet_main, sw_if_index, counter_name, bytes);
121 format_vnet_interface_simple_counters (u8 * s, va_list * args)
123 stats_main_t *sm = &stats_main;
124 vl_api_vnet_interface_simple_counters_t *mp =
125 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
127 u32 count, sw_if_index;
128 count = ntohl (mp->count);
129 sw_if_index = ntohl (mp->first_sw_if_index);
131 vp = (u64 *) mp->data;
134 switch (mp->vnet_counter_type)
136 case VNET_INTERFACE_COUNTER_DROP:
137 counter_name = "drop";
139 case VNET_INTERFACE_COUNTER_PUNT:
140 counter_name = "punt";
142 case VNET_INTERFACE_COUNTER_IP4:
143 counter_name = "ip4";
145 case VNET_INTERFACE_COUNTER_IP6:
146 counter_name = "ip6";
148 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
149 counter_name = "rx-no-buff";
151 case VNET_INTERFACE_COUNTER_RX_MISS:
152 counter_name = "rx-miss";
154 case VNET_INTERFACE_COUNTER_RX_ERROR:
155 counter_name = "rx-error (fifo-full)";
157 case VNET_INTERFACE_COUNTER_TX_ERROR:
158 counter_name = "tx-error (fifo-full)";
161 counter_name = "bogus";
164 for (i = 0; i < count; i++)
166 v = clib_mem_unaligned (vp, u64);
167 v = clib_net_to_host_u64 (v);
169 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
170 sm->vnet_main, sw_if_index, counter_name, v);
178 dslock (stats_main_t * sm, int release_hint, int tag)
181 data_structure_lock_t *l = sm->data_structure_lock;
183 if (PREDICT_FALSE (l == 0))
186 thread_index = vlib_get_thread_index ();
187 if (l->lock && l->thread_index == thread_index)
196 while (__sync_lock_test_and_set (&l->lock, 1))
199 l->thread_index = thread_index;
204 stats_dslock_with_hint (int hint, int tag)
206 stats_main_t *sm = &stats_main;
207 dslock (sm, hint, tag);
211 dsunlock (stats_main_t * sm)
214 data_structure_lock_t *l = sm->data_structure_lock;
216 if (PREDICT_FALSE (l == 0))
219 thread_index = vlib_get_thread_index ();
220 ASSERT (l->lock && l->thread_index == thread_index);
226 CLIB_MEMORY_BARRIER ();
232 stats_dsunlock (int hint, int tag)
234 stats_main_t *sm = &stats_main;
239 do_simple_interface_counters (stats_main_t * sm)
241 vl_api_vnet_interface_simple_counters_t *mp = 0;
242 vnet_interface_main_t *im = sm->interface_main;
243 api_main_t *am = sm->api_main;
244 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
245 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
246 vlib_simple_counter_main_t *cm;
247 u32 items_this_message = 0;
252 * Prevent interface registration from expanding / moving the vectors...
253 * That tends never to happen, so we can hold this lock for a while.
255 vnet_interface_counter_lock (im);
257 vec_foreach (cm, im->sw_if_counters)
259 n_counts = vlib_simple_counter_n_counters (cm);
260 for (i = 0; i < n_counts; i++)
264 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
267 mp = vl_msg_api_alloc_as_if_client
268 (sizeof (*mp) + items_this_message * sizeof (v));
269 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
270 mp->vnet_counter_type = cm - im->sw_if_counters;
271 mp->first_sw_if_index = htonl (i);
273 vp = (u64 *) mp->data;
275 v = vlib_get_simple_counter (cm, i);
276 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
279 if (mp->count == items_this_message)
281 mp->count = htonl (items_this_message);
282 /* Send to the main thread... */
283 vl_msg_api_send_shmem (q, (u8 *) & mp);
289 vnet_interface_counter_unlock (im);
293 do_combined_interface_counters (stats_main_t * sm)
295 vl_api_vnet_interface_combined_counters_t *mp = 0;
296 vnet_interface_main_t *im = sm->interface_main;
297 api_main_t *am = sm->api_main;
298 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
299 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
300 vlib_combined_counter_main_t *cm;
301 u32 items_this_message = 0;
302 vlib_counter_t v, *vp = 0;
305 vnet_interface_counter_lock (im);
307 vec_foreach (cm, im->combined_sw_if_counters)
309 n_counts = vlib_combined_counter_n_counters (cm);
310 for (i = 0; i < n_counts; i++)
314 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
317 mp = vl_msg_api_alloc_as_if_client
318 (sizeof (*mp) + items_this_message * sizeof (v));
319 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
320 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
321 mp->first_sw_if_index = htonl (i);
323 vp = (vlib_counter_t *) mp->data;
325 vlib_get_combined_counter (cm, i, &v);
326 clib_mem_unaligned (&vp->packets, u64)
327 = clib_host_to_net_u64 (v.packets);
328 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
331 if (mp->count == items_this_message)
333 mp->count = htonl (items_this_message);
334 /* Send to the main thread... */
335 vl_msg_api_send_shmem (q, (u8 *) & mp);
341 vnet_interface_counter_unlock (im);
345 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
347 struct timespec _req, *req = &_req;
348 struct timespec _rem, *rem = &_rem;
354 if (nanosleep (req, rem) == 0)
359 clib_unix_warning ("nanosleep");
365 * @brief The context passed when collecting adjacency counters
367 typedef struct ip4_nbr_stats_ctx_t_
370 * The SW IF index all these adjs belong to
375 * A vector of ip4 nbr counters
377 vl_api_ip4_nbr_counter_t *counters;
378 } ip4_nbr_stats_ctx_t;
381 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
383 vl_api_ip4_nbr_counter_t *vl_counter;
384 vlib_counter_t adj_counter;
385 ip4_nbr_stats_ctx_t *ctx;
389 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
391 if (0 != adj_counter.packets)
393 vec_add2 (ctx->counters, vl_counter, 1);
396 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
397 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
398 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
399 vl_counter->link_type = adj->ia_link;
401 return (ADJ_WALK_RC_CONTINUE);
404 #define MIN(x,y) (((x)<(y))?(x):(y))
407 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
409 api_main_t *am = sm->api_main;
410 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
411 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
412 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
416 * If the walk context has counters, which may be left over from the last
417 * suspend, then we continue from there.
419 while (0 != vec_len (ctx->counters))
421 u32 n_items = MIN (vec_len (ctx->counters),
422 IP4_FIB_COUNTER_BATCH_SIZE);
425 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
427 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
430 (vl_api_ip4_nbr_counter_t)));
431 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
432 mp->count = ntohl (n_items);
433 mp->sw_if_index = ntohl (ctx->sw_if_index);
438 * copy the counters from the back of the context, then we can easily
439 * 'erase' them by resetting the vector length.
440 * The order we push the stats to the caller is not important.
443 &ctx->counters[vec_len (ctx->counters) - n_items],
444 n_items * sizeof (*ctx->counters));
446 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
451 unix_shared_memory_queue_lock (q);
452 pause = unix_shared_memory_queue_is_full (q);
454 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
455 unix_shared_memory_queue_unlock (q);
459 ip46_fib_stats_delay (sm, 0 /* sec */ ,
460 STATS_RELEASE_DELAY_NS);
465 do_ip4_nbrs (stats_main_t * sm)
467 vnet_main_t *vnm = vnet_get_main ();
468 vnet_interface_main_t *im = &vnm->interface_main;
469 vnet_sw_interface_t *si;
471 ip4_nbr_stats_ctx_t ctx = {
477 pool_foreach (si, im->sw_interfaces,
480 * update the interface we are now concerned with
482 ctx.sw_if_index = si->sw_if_index;
485 * we are about to walk another interface, so we shouldn't have any pending
488 ASSERT(ctx.counters == NULL);
491 * visit each neighbour adjacency on the interface and collect
493 * Because we hold the lock the walk is synchronous, so safe to routing
494 * updates. It's limited in work by the number of adjacenies on an
495 * interface, which is typically not huge.
497 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
498 adj_nbr_walk (si->sw_if_index,
505 * if this interface has some adjacencies with counters then ship them,
506 * else continue to the next interface.
508 if (NULL != ctx.counters)
510 ip4_nbr_ship(sm, &ctx);
517 * @brief The context passed when collecting adjacency counters
519 typedef struct ip6_nbr_stats_ctx_t_
522 * The SW IF index all these adjs belong to
527 * A vector of ip6 nbr counters
529 vl_api_ip6_nbr_counter_t *counters;
530 } ip6_nbr_stats_ctx_t;
533 ip6_nbr_stats_cb (adj_index_t ai,
536 vl_api_ip6_nbr_counter_t *vl_counter;
537 vlib_counter_t adj_counter;
538 ip6_nbr_stats_ctx_t *ctx;
542 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
544 if (0 != adj_counter.packets)
546 vec_add2(ctx->counters, vl_counter, 1);
549 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
550 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
551 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
552 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
553 vl_counter->link_type = adj->ia_link;
555 return (ADJ_WALK_RC_CONTINUE);
558 #define MIN(x,y) (((x)<(y))?(x):(y))
561 ip6_nbr_ship (stats_main_t * sm,
562 ip6_nbr_stats_ctx_t *ctx)
564 api_main_t *am = sm->api_main;
565 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
566 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
567 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
571 * If the walk context has counters, which may be left over from the last
572 * suspend, then we continue from there.
574 while (0 != vec_len(ctx->counters))
576 u32 n_items = MIN (vec_len (ctx->counters),
577 IP6_FIB_COUNTER_BATCH_SIZE);
580 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
582 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
585 (vl_api_ip6_nbr_counter_t)));
586 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
587 mp->count = ntohl (n_items);
588 mp->sw_if_index = ntohl (ctx->sw_if_index);
593 * copy the counters from the back of the context, then we can easily
594 * 'erase' them by resetting the vector length.
595 * The order we push the stats to the caller is not important.
598 &ctx->counters[vec_len (ctx->counters) - n_items],
599 n_items * sizeof (*ctx->counters));
601 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
606 unix_shared_memory_queue_lock (q);
607 pause = unix_shared_memory_queue_is_full (q);
609 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
610 unix_shared_memory_queue_unlock (q);
614 ip46_fib_stats_delay (sm, 0 /* sec */ ,
615 STATS_RELEASE_DELAY_NS);
620 do_ip6_nbrs (stats_main_t * sm)
622 vnet_main_t *vnm = vnet_get_main ();
623 vnet_interface_main_t *im = &vnm->interface_main;
624 vnet_sw_interface_t *si;
626 ip6_nbr_stats_ctx_t ctx = {
632 pool_foreach (si, im->sw_interfaces,
635 * update the interface we are now concerned with
637 ctx.sw_if_index = si->sw_if_index;
640 * we are about to walk another interface, so we shouldn't have any pending
643 ASSERT(ctx.counters == NULL);
646 * visit each neighbour adjacency on the interface and collect
648 * Because we hold the lock the walk is synchronous, so safe to routing
649 * updates. It's limited in work by the number of adjacenies on an
650 * interface, which is typically not huge.
652 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
653 adj_nbr_walk (si->sw_if_index,
660 * if this interface has some adjacencies with counters then ship them,
661 * else continue to the next interface.
663 if (NULL != ctx.counters)
665 ip6_nbr_ship(sm, &ctx);
672 do_ip4_fibs (stats_main_t * sm)
674 ip4_main_t *im4 = &ip4_main;
675 api_main_t *am = sm->api_main;
676 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
677 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
681 do_ip46_fibs_t *do_fibs;
682 vl_api_vnet_ip4_fib_counters_t *mp = 0;
683 u32 items_this_message;
684 vl_api_ip4_fib_counter_t *ctrp = 0;
685 u32 start_at_fib_index = 0;
688 do_fibs = &sm->do_ip46_fibs;
691 vec_reset_length (do_fibs->fibs);
693 pool_foreach (fib, im4->fibs,
694 ({vec_add1(do_fibs->fibs,fib);}));
698 for (j = 0; j < vec_len (do_fibs->fibs); j++)
700 fib = do_fibs->fibs[j];
701 /* We may have bailed out due to control-plane activity */
702 while ((fib - im4->fibs) < start_at_fib_index)
705 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
709 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
710 mp = vl_msg_api_alloc_as_if_client
712 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
713 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
715 mp->vrf_id = ntohl (fib->ft_table_id);
716 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
720 /* happens if the last FIB was empty... */
721 ASSERT (mp->count == 0);
722 mp->vrf_id = ntohl (fib->ft_table_id);
725 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
727 vec_reset_length (do_fibs->ip4routes);
728 vec_reset_length (do_fibs->results);
730 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
732 uword *hash = v4_fib->fib_entry_by_dst_address[i];
736 vec_reset_length (do_fibs->pvec);
738 x.address_length = i;
740 hash_foreach_pair (p, hash, (
742 vec_add1 (do_fibs->pvec, p);}
744 for (k = 0; k < vec_len (do_fibs->pvec); k++)
746 p = do_fibs->pvec[k];
747 x.address.data_u32 = p->key;
748 x.index = p->value[0];
750 vec_add1 (do_fibs->ip4routes, x);
751 if (sm->data_structure_lock->release_hint)
753 start_at_fib_index = fib - im4->fibs;
755 ip46_fib_stats_delay (sm, 0 /* sec */ ,
756 STATS_RELEASE_DELAY_NS);
758 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
764 vec_foreach (r, do_fibs->ip4routes)
767 const dpo_id_t *dpo_id;
770 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
771 index = (u32) dpo_id->dpoi_index;
773 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
777 * seen at least one packet, send it.
782 /* already in net byte order */
783 ctrp->address = r->address.as_u32;
784 ctrp->address_length = r->address_length;
785 ctrp->packets = clib_host_to_net_u64 (c.packets);
786 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
790 if (mp->count == items_this_message)
792 mp->count = htonl (items_this_message);
794 * If the main thread's input queue is stuffed,
795 * drop the data structure lock (which the main thread
796 * may want), and take a pause.
798 unix_shared_memory_queue_lock (q);
799 if (unix_shared_memory_queue_is_full (q))
802 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
803 unix_shared_memory_queue_unlock (q);
805 ip46_fib_stats_delay (sm, 0 /* sec */ ,
806 STATS_RELEASE_DELAY_NS);
809 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
810 unix_shared_memory_queue_unlock (q);
812 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
813 mp = vl_msg_api_alloc_as_if_client
815 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
816 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
818 mp->vrf_id = ntohl (fib->ft_table_id);
819 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
821 } /* for each (mp or single) adj */
822 if (sm->data_structure_lock->release_hint)
824 start_at_fib_index = fib - im4->fibs;
826 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
828 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
831 } /* vec_foreach (routes) */
835 /* Flush any data from this fib */
838 mp->count = htonl (mp->count);
839 vl_msg_api_send_shmem (q, (u8 *) & mp);
844 /* If e.g. the last FIB had no reportable routes, free the buffer */
846 vl_msg_api_free (mp);
852 ip6_route_t **routep;
854 } add_routes_in_fib_arg_t;
857 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
859 add_routes_in_fib_arg_t *ap = arg;
860 stats_main_t *sm = ap->sm;
862 if (sm->data_structure_lock->release_hint)
863 clib_longjmp (&sm->jmp_buf, 1);
865 if (kvp->key[2] >> 32 == ap->fib_index)
869 addr = (ip6_address_t *) kvp;
870 vec_add2 (*ap->routep, r, 1);
871 r->address = addr[0];
872 r->address_length = kvp->key[2] & 0xFF;
873 r->index = kvp->value;
878 do_ip6_fibs (stats_main_t * sm)
880 ip6_main_t *im6 = &ip6_main;
881 api_main_t *am = sm->api_main;
882 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
883 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
886 do_ip46_fibs_t *do_fibs;
887 vl_api_vnet_ip6_fib_counters_t *mp = 0;
888 u32 items_this_message;
889 vl_api_ip6_fib_counter_t *ctrp = 0;
890 u32 start_at_fib_index = 0;
891 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
892 add_routes_in_fib_arg_t _a, *a = &_a;
895 do_fibs = &sm->do_ip46_fibs;
897 vec_reset_length (do_fibs->fibs);
899 pool_foreach (fib, im6->fibs,
900 ({vec_add1(do_fibs->fibs,fib);}));
904 for (i = 0; i < vec_len (do_fibs->fibs); i++)
906 fib = do_fibs->fibs[i];
907 /* We may have bailed out due to control-plane activity */
908 while ((fib - im6->fibs) < start_at_fib_index)
913 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
914 mp = vl_msg_api_alloc_as_if_client
916 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
917 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
919 mp->vrf_id = ntohl (fib->ft_table_id);
920 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
923 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
925 vec_reset_length (do_fibs->ip6routes);
926 vec_reset_length (do_fibs->results);
928 a->fib_index = fib - im6->fibs;
929 a->routep = &do_fibs->ip6routes;
932 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
934 start_at_fib_index = fib - im6->fibs;
935 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
940 ip46_fib_stats_delay (sm, 0 /* sec */ ,
941 STATS_RELEASE_DELAY_NS);
943 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
947 vec_foreach (r, do_fibs->ip6routes)
951 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
955 * seen at least one packet, send it.
959 /* already in net byte order */
960 ctrp->address[0] = r->address.as_u64[0];
961 ctrp->address[1] = r->address.as_u64[1];
962 ctrp->address_length = (u8) r->address_length;
963 ctrp->packets = clib_host_to_net_u64 (c.packets);
964 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
968 if (mp->count == items_this_message)
970 mp->count = htonl (items_this_message);
972 * If the main thread's input queue is stuffed,
973 * drop the data structure lock (which the main thread
974 * may want), and take a pause.
976 unix_shared_memory_queue_lock (q);
977 if (unix_shared_memory_queue_is_full (q))
980 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
981 unix_shared_memory_queue_unlock (q);
983 ip46_fib_stats_delay (sm, 0 /* sec */ ,
984 STATS_RELEASE_DELAY_NS);
987 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
988 unix_shared_memory_queue_unlock (q);
990 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
991 mp = vl_msg_api_alloc_as_if_client
993 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
994 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
996 mp->vrf_id = ntohl (fib->ft_table_id);
997 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1001 if (sm->data_structure_lock->release_hint)
1003 start_at_fib_index = fib - im6->fibs;
1005 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1007 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1010 } /* vec_foreach (routes) */
1014 /* Flush any data from this fib */
1017 mp->count = htonl (mp->count);
1018 vl_msg_api_send_shmem (q, (u8 *) & mp);
1023 /* If e.g. the last FIB had no reportable routes, free the buffer */
1025 vl_msg_api_free (mp);
1029 stats_thread_fn (void *arg)
1031 stats_main_t *sm = &stats_main;
1032 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1033 vlib_thread_main_t *tm = vlib_get_thread_main ();
1035 /* stats thread wants no signals. */
1039 pthread_sigmask (SIG_SETMASK, &s, 0);
1042 if (vec_len (tm->thread_prefix))
1043 vlib_set_thread_name ((char *)
1044 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
1046 clib_mem_set_heap (w->thread_mheap);
1050 /* 10 second poll interval */
1051 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
1053 if (!(sm->enable_poller))
1055 do_simple_interface_counters (sm);
1056 do_combined_interface_counters (sm);
1065 vl_api_vnet_interface_simple_counters_t_handler
1066 (vl_api_vnet_interface_simple_counters_t * mp)
1068 vpe_client_stats_registration_t *reg;
1069 stats_main_t *sm = &stats_main;
1070 unix_shared_memory_queue_t *q, *q_prev = NULL;
1071 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
1075 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
1078 vec_reset_length(sm->regs);
1079 pool_foreach(reg, sm->stats_registrations,
1081 vec_add1(sm->regs,reg);
1084 for (i = 0; i < vec_len (sm->regs); i++)
1087 if (reg->stats_registrations & INTERFACE_SIMPLE_COUNTERS)
1089 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1092 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1094 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
1095 clib_memcpy (mp_copy, mp, mp_size);
1096 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1104 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
1107 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1109 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1113 vl_msg_api_free (mp);
1118 vl_api_vnet_interface_combined_counters_t_handler
1119 (vl_api_vnet_interface_combined_counters_t * mp)
1121 vpe_client_stats_registration_t *reg;
1122 stats_main_t *sm = &stats_main;
1123 unix_shared_memory_queue_t *q, *q_prev = NULL;
1124 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
1127 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
1130 pool_foreach(reg, sm->stats_registrations,
1132 if (reg->stats_registrations & INTERFACE_COMBINED_COUNTERS)
1134 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1137 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1139 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1140 clib_memcpy(mp_copy, mp, mp_size);
1141 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1151 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
1154 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1156 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1160 vl_msg_api_free (mp);
1165 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
1167 vpe_client_stats_registration_t *reg;
1168 stats_main_t *sm = &stats_main;
1169 unix_shared_memory_queue_t *q, *q_prev = NULL;
1170 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
1173 mp_size = sizeof (*mp_copy) +
1174 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
1177 pool_foreach(reg, sm->stats_registrations,
1179 if (reg->stats_registrations & IP4_FIB_COUNTERS)
1181 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1184 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1186 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1187 clib_memcpy(mp_copy, mp, mp_size);
1188 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1196 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1198 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1202 vl_msg_api_free (mp);
1207 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
1209 vpe_client_stats_registration_t *reg;
1210 stats_main_t *sm = &stats_main;
1211 unix_shared_memory_queue_t *q, *q_prev = NULL;
1212 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
1215 mp_size = sizeof (*mp_copy) +
1216 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
1219 pool_foreach(reg, sm->stats_registrations,
1221 if (reg->stats_registrations & IP4_NBR_COUNTERS)
1223 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1226 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1228 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1229 clib_memcpy(mp_copy, mp, mp_size);
1230 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1238 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1240 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1244 vl_msg_api_free (mp);
1249 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
1251 vpe_client_stats_registration_t *reg;
1252 stats_main_t *sm = &stats_main;
1253 unix_shared_memory_queue_t *q, *q_prev = NULL;
1254 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
1257 mp_size = sizeof (*mp_copy) +
1258 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
1261 pool_foreach(reg, sm->stats_registrations,
1263 if (reg->stats_registrations & IP6_FIB_COUNTERS)
1265 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1268 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1270 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1271 clib_memcpy(mp_copy, mp, mp_size);
1272 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1280 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1282 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1286 vl_msg_api_free (mp);
1291 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
1293 vpe_client_stats_registration_t *reg;
1294 stats_main_t *sm = &stats_main;
1295 unix_shared_memory_queue_t *q, *q_prev = NULL;
1296 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
1299 mp_size = sizeof (*mp_copy) +
1300 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
1303 pool_foreach(reg, sm->stats_registrations,
1305 if (reg->stats_registrations & IP6_NBR_COUNTERS)
1307 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1310 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1312 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1313 clib_memcpy(mp_copy, mp, mp_size);
1314 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1322 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1324 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1328 vl_msg_api_free (mp);
1333 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
1335 stats_main_t *sm = &stats_main;
1336 vpe_client_stats_registration_t *rp;
1337 vl_api_want_stats_reply_t *rmp;
1340 unix_shared_memory_queue_t *q;
1342 p = hash_get (sm->stats_registration_hash, mp->client_index);
1345 if (mp->enable_disable)
1347 clib_warning ("pid %d: already enabled...", mp->pid);
1353 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1354 pool_put (sm->stats_registrations, rp);
1355 hash_unset (sm->stats_registration_hash, mp->client_index);
1359 if (mp->enable_disable == 0)
1361 clib_warning ("pid %d: already disabled...", mp->pid);
1365 pool_get (sm->stats_registrations, rp);
1366 rp->client.client_index = mp->client_index;
1367 rp->client.client_pid = mp->pid;
1368 rp->stats_registrations |= INTERFACE_SIMPLE_COUNTERS;
1369 rp->stats_registrations |= INTERFACE_COMBINED_COUNTERS;
1370 rp->stats_registrations |= IP4_FIB_COUNTERS;
1371 rp->stats_registrations |= IP4_NBR_COUNTERS;
1372 rp->stats_registrations |= IP6_FIB_COUNTERS;
1373 rp->stats_registrations |= IP6_NBR_COUNTERS;
1375 hash_set (sm->stats_registration_hash, rp->client.client_index,
1376 rp - sm->stats_registrations);
1379 if (pool_elts (sm->stats_registrations))
1380 sm->enable_poller = 1;
1382 sm->enable_poller = 0;
1384 q = vl_api_client_index_to_input_queue (mp->client_index);
1389 rmp = vl_msg_api_alloc (sizeof (*rmp));
1390 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
1391 rmp->context = mp->context;
1392 rmp->retval = retval;
1394 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1398 vl_api_want_interface_simple_stats_t_handler
1399 (vl_api_want_interface_simple_stats_t * mp)
1401 stats_main_t *sm = &stats_main;
1402 vpe_client_stats_registration_t *rp;
1403 vl_api_want_interface_simple_stats_reply_t *rmp;
1406 unix_shared_memory_queue_t *q;
1408 p = hash_get (sm->stats_registration_hash, mp->client_index);
1411 if (mp->enable_disable == 0)
1413 if (!p) // No client to disable
1415 clib_warning ("pid %d: already disabled for stats...", mp->pid);
1420 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1421 if (!rp->stats_registrations & INTERFACE_SIMPLE_COUNTERS) // Client but doesn't want this.
1424 ("pid %d: already disabled for interface simple stats...",
1431 rp->stats_registrations &= ~(INTERFACE_SIMPLE_COUNTERS); // Clear flag
1432 if (rp->stats_registrations == 0) // Client isn't listening to anything else
1434 pool_put (sm->stats_registrations, rp);
1435 hash_unset (sm->stats_registration_hash, mp->client_index);
1441 /* Get client from pool */
1443 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1445 if (!p || !rp) // Doesn't exist, make a new entry
1447 pool_get (sm->stats_registrations, rp);
1448 rp->client.client_index = mp->client_index;
1449 rp->client.client_pid = mp->pid;
1451 rp->stats_registrations |= INTERFACE_SIMPLE_COUNTERS;
1452 hash_set (sm->stats_registration_hash, rp->client.client_index,
1453 rp - sm->stats_registrations);
1456 if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now.
1457 sm->enable_poller = 1;
1459 sm->enable_poller = 0;
1461 q = vl_api_client_index_to_input_queue (mp->client_index);
1466 rmp = vl_msg_api_alloc (sizeof (*rmp));
1467 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
1468 rmp->context = mp->context;
1469 rmp->retval = retval;
1471 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1475 vl_api_want_interface_combined_stats_t_handler
1476 (vl_api_want_interface_combined_stats_t * mp)
1478 stats_main_t *sm = &stats_main;
1479 vpe_client_stats_registration_t *rp;
1480 vl_api_want_interface_combined_stats_reply_t *rmp;
1483 unix_shared_memory_queue_t *q;
1485 p = hash_get (sm->stats_registration_hash, mp->client_index);
1488 if (mp->enable_disable == 0)
1490 if (!p) // No client to disable
1492 clib_warning ("pid %d: already disabled for stats...", mp->pid);
1497 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1498 if (!(rp->stats_registrations & INTERFACE_COMBINED_COUNTERS)) // Client but doesn't want this.
1501 ("pid %d: already disabled for interface COMBINED stats...",
1508 rp->stats_registrations &= ~(INTERFACE_COMBINED_COUNTERS); // Clear flag
1509 if (rp->stats_registrations == 0) // Client isn't listening to anything else
1511 pool_put (sm->stats_registrations, rp);
1512 hash_unset (sm->stats_registration_hash, mp->client_index);
1518 /* Get client from pool */
1520 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1522 if (!p || !rp) // Doesn't exist, make a new entry
1524 pool_get (sm->stats_registrations, rp);
1525 rp->client.client_index = mp->client_index;
1526 rp->client.client_pid = mp->pid;
1528 rp->stats_registrations |= INTERFACE_COMBINED_COUNTERS;
1529 hash_set (sm->stats_registration_hash, rp->client.client_index,
1530 rp - sm->stats_registrations);
1533 if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now.
1534 sm->enable_poller = 1;
1536 sm->enable_poller = 0;
1538 q = vl_api_client_index_to_input_queue (mp->client_index);
1543 rmp = vl_msg_api_alloc (sizeof (*rmp));
1544 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
1545 rmp->context = mp->context;
1546 rmp->retval = retval;
1548 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1552 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
1554 stats_main_t *sm = &stats_main;
1555 vpe_client_stats_registration_t *rp;
1556 vl_api_want_ip4_fib_stats_reply_t *rmp;
1559 unix_shared_memory_queue_t *q;
1561 p = hash_get (sm->stats_registration_hash, mp->client_index);
1565 $$$ FIXME: need std return codes. Still undecided if enabling already
1566 enabled (and similar for disabled) is really a -'ve error condition or
1569 if (mp->enable_disable == 0)
1571 if (!p) // No client to disable
1573 clib_warning ("pid %d: already disabled for stats...", mp->pid);
1578 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1579 if (!(rp->stats_registrations & IP4_FIB_COUNTERS)) // Client but doesn't want this.
1581 clib_warning ("pid %d: already disabled for interface ip4 fib...",
1588 rp->stats_registrations &= ~(IP4_FIB_COUNTERS); // Clear flag
1589 if (rp->stats_registrations == 0) // Client isn't listening to anything else
1591 pool_put (sm->stats_registrations, rp);
1592 hash_unset (sm->stats_registration_hash, mp->client_index);
1598 /* Get client from pool */
1600 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1602 if (!p || !rp) // Doesn't exist, make a new entry
1604 pool_get (sm->stats_registrations, rp);
1605 rp->client.client_index = mp->client_index;
1606 rp->client.client_pid = mp->pid;
1608 rp->stats_registrations |= IP4_FIB_COUNTERS;
1609 hash_set (sm->stats_registration_hash, rp->client.client_index,
1610 rp - sm->stats_registrations);
1613 if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now.
1614 sm->enable_poller = 1;
1616 sm->enable_poller = 0;
1618 q = vl_api_client_index_to_input_queue (mp->client_index);
1623 rmp = vl_msg_api_alloc (sizeof (*rmp));
1624 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
1625 rmp->context = mp->context;
1626 rmp->retval = retval;
1628 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1632 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
1634 stats_main_t *sm = &stats_main;
1635 vpe_client_stats_registration_t *rp;
1636 vl_api_want_ip6_fib_stats_reply_t *rmp;
1639 unix_shared_memory_queue_t *q;
1641 p = hash_get (sm->stats_registration_hash, mp->client_index);
1645 $$$ FIXME: need std return codes. Still undecided if enabling already
1646 enabled (and similar for disabled) is really a -'ve error condition or
1649 if (mp->enable_disable == 0)
1651 if (!p) // No client to disable
1653 clib_warning ("pid %d: already disabled for stats...", mp->pid);
1658 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1659 if (!(rp->stats_registrations & IP6_FIB_COUNTERS)) // Client but doesn't want this.
1661 clib_warning ("pid %d: already disabled for interface ip6 fib...",
1668 rp->stats_registrations &= ~(IP6_FIB_COUNTERS); // Clear flag
1669 if (rp->stats_registrations == 0) // Client isn't listening to anything else
1671 pool_put (sm->stats_registrations, rp);
1672 hash_unset (sm->stats_registration_hash, mp->client_index);
1678 /* Get client from pool */
1680 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1682 if (!p || !rp) // Doesn't exist, make a new entry
1684 pool_get (sm->stats_registrations, rp);
1685 rp->client.client_index = mp->client_index;
1686 rp->client.client_pid = mp->pid;
1688 rp->stats_registrations |= IP6_FIB_COUNTERS;
1689 hash_set (sm->stats_registration_hash, rp->client.client_index,
1690 rp - sm->stats_registrations);
1693 if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now.
1694 sm->enable_poller = 1;
1696 sm->enable_poller = 0;
1698 q = vl_api_client_index_to_input_queue (mp->client_index);
1703 rmp = vl_msg_api_alloc (sizeof (*rmp));
1704 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
1705 rmp->context = mp->context;
1706 rmp->retval = retval;
1708 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1711 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
1713 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
1718 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
1723 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
1725 stats_main_t *sm = &stats_main;
1726 vnet_interface_main_t *im = sm->interface_main;
1727 vl_api_vnet_get_summary_stats_reply_t *rmp;
1728 vlib_combined_counter_main_t *cm;
1731 u64 total_pkts[VLIB_N_RX_TX];
1732 u64 total_bytes[VLIB_N_RX_TX];
1734 unix_shared_memory_queue_t *q =
1735 vl_api_client_index_to_input_queue (mp->client_index);
1740 rmp = vl_msg_api_alloc (sizeof (*rmp));
1741 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
1742 rmp->context = mp->context;
1745 memset (total_pkts, 0, sizeof (total_pkts));
1746 memset (total_bytes, 0, sizeof (total_bytes));
1748 vnet_interface_counter_lock (im);
1750 vec_foreach (cm, im->combined_sw_if_counters)
1752 which = cm - im->combined_sw_if_counters;
1754 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
1756 vlib_get_combined_counter (cm, i, &v);
1757 total_pkts[which] += v.packets;
1758 total_bytes[which] += v.bytes;
1761 vnet_interface_counter_unlock (im);
1763 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
1764 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
1765 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
1766 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
1768 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
1770 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1774 stats_memclnt_delete_callback (u32 client_index)
1776 vpe_client_stats_registration_t *rp;
1777 stats_main_t *sm = &stats_main;
1780 p = hash_get (sm->stats_registration_hash, client_index);
1783 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1784 pool_put (sm->stats_registrations, rp);
1785 hash_unset (sm->stats_registration_hash, client_index);
1791 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
1792 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
1793 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
1794 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
1795 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
1796 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
1797 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
1798 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
1799 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
1800 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
1801 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
1802 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
1804 static clib_error_t *
1805 stats_init (vlib_main_t * vm)
1807 stats_main_t *sm = &stats_main;
1808 api_main_t *am = &api_main;
1809 void *vlib_worker_thread_bootstrap_fn (void *arg);
1812 sm->vnet_main = vnet_get_main ();
1813 sm->interface_main = &vnet_get_main ()->interface_main;
1815 sm->stats_poll_interval_in_seconds = 10;
1816 sm->data_structure_lock =
1817 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
1818 CLIB_CACHE_LINE_BYTES);
1819 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
1822 vl_msg_api_set_handlers(VL_API_##N, #n, \
1823 vl_api_##n##_t_handler, \
1825 vl_api_##n##_t_endian, \
1826 vl_api_##n##_t_print, \
1827 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
1831 /* tell the msg infra not to free these messages... */
1832 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
1833 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
1834 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
1835 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
1836 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
1837 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
1842 VLIB_INIT_FUNCTION (stats_init);
1845 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
1847 .function = stats_thread_fn,
1850 .no_data_structure_clone = 1,
1856 * fd.io coding-style-patch-verification: ON
1859 * eval: (c-set-style "gnu")