2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/dpo/load_balance.h>
23 stats_main_t stats_main;
25 #include <vnet/ip/ip.h>
27 #include <vpp/api/vpe_msg_enum.h>
30 #define f64_print(a,b)
32 #define vl_typedefs /* define message structures */
33 #include <vpp/api/vpe_all_api_h.h>
36 #define vl_endianfun /* define message structures */
37 #include <vpp/api/vpe_all_api_h.h>
40 /* instantiate all the print functions we know about */
41 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
43 #include <vpp/api/vpe_all_api_h.h>
46 #define foreach_stats_msg \
47 _(WANT_STATS, want_stats) \
48 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
49 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
50 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
51 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
52 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
53 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
54 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
55 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
56 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
57 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
58 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
59 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
60 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats)
63 #define vl_msg_name_crc_list
64 #include <vpp/stats/stats.api.h>
65 #undef vl_msg_name_crc_list
68 setup_message_id_table (api_main_t * am)
71 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
72 foreach_vl_msg_name_crc_stats;
76 /* These constants ensure msg sizes <= 1024, aka ring allocation */
77 #define SIMPLE_COUNTER_BATCH_SIZE 126
78 #define COMBINED_COUNTER_BATCH_SIZE 63
79 #define IP4_FIB_COUNTER_BATCH_SIZE 48
80 #define IP6_FIB_COUNTER_BATCH_SIZE 30
83 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
87 format_vnet_interface_combined_counters (u8 * s, va_list * args)
89 stats_main_t *sm = &stats_main;
90 vl_api_vnet_interface_combined_counters_t *mp =
91 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
94 u32 count, sw_if_index;
96 count = ntohl (mp->count);
97 sw_if_index = ntohl (mp->first_sw_if_index);
101 vp = (vlib_counter_t *) mp->data;
103 switch (mp->vnet_counter_type)
105 case VNET_INTERFACE_COUNTER_RX:
108 case VNET_INTERFACE_COUNTER_TX:
112 counter_name = "bogus";
115 for (i = 0; i < count; i++)
117 packets = clib_mem_unaligned (&vp->packets, u64);
118 packets = clib_net_to_host_u64 (packets);
119 bytes = clib_mem_unaligned (&vp->bytes, u64);
120 bytes = clib_net_to_host_u64 (bytes);
122 s = format (s, "%U.%s.packets %lld\n",
123 format_vnet_sw_if_index_name,
124 sm->vnet_main, sw_if_index, counter_name, packets);
125 s = format (s, "%U.%s.bytes %lld\n",
126 format_vnet_sw_if_index_name,
127 sm->vnet_main, sw_if_index, counter_name, bytes);
134 format_vnet_interface_simple_counters (u8 * s, va_list * args)
136 stats_main_t *sm = &stats_main;
137 vl_api_vnet_interface_simple_counters_t *mp =
138 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
140 u32 count, sw_if_index;
141 count = ntohl (mp->count);
142 sw_if_index = ntohl (mp->first_sw_if_index);
144 vp = (u64 *) mp->data;
147 switch (mp->vnet_counter_type)
149 case VNET_INTERFACE_COUNTER_DROP:
150 counter_name = "drop";
152 case VNET_INTERFACE_COUNTER_PUNT:
153 counter_name = "punt";
155 case VNET_INTERFACE_COUNTER_IP4:
156 counter_name = "ip4";
158 case VNET_INTERFACE_COUNTER_IP6:
159 counter_name = "ip6";
161 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
162 counter_name = "rx-no-buff";
164 case VNET_INTERFACE_COUNTER_RX_MISS:
165 counter_name = "rx-miss";
167 case VNET_INTERFACE_COUNTER_RX_ERROR:
168 counter_name = "rx-error (fifo-full)";
170 case VNET_INTERFACE_COUNTER_TX_ERROR:
171 counter_name = "tx-error (fifo-full)";
174 counter_name = "bogus";
177 for (i = 0; i < count; i++)
179 v = clib_mem_unaligned (vp, u64);
180 v = clib_net_to_host_u64 (v);
182 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
183 sm->vnet_main, sw_if_index, counter_name, v);
191 dslock (stats_main_t * sm, int release_hint, int tag)
194 data_structure_lock_t *l = sm->data_structure_lock;
196 if (PREDICT_FALSE (l == 0))
199 thread_index = vlib_get_thread_index ();
200 if (l->lock && l->thread_index == thread_index)
209 while (__sync_lock_test_and_set (&l->lock, 1))
212 l->thread_index = thread_index;
217 stats_dslock_with_hint (int hint, int tag)
219 stats_main_t *sm = &stats_main;
220 dslock (sm, hint, tag);
224 dsunlock (stats_main_t * sm)
227 data_structure_lock_t *l = sm->data_structure_lock;
229 if (PREDICT_FALSE (l == 0))
232 thread_index = vlib_get_thread_index ();
233 ASSERT (l->lock && l->thread_index == thread_index);
239 CLIB_MEMORY_BARRIER ();
245 stats_dsunlock (int hint, int tag)
247 stats_main_t *sm = &stats_main;
252 do_simple_interface_counters (stats_main_t * sm)
254 vl_api_vnet_interface_simple_counters_t *mp = 0;
255 vnet_interface_main_t *im = sm->interface_main;
256 api_main_t *am = sm->api_main;
257 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
258 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
259 vlib_simple_counter_main_t *cm;
260 u32 items_this_message = 0;
265 * Prevent interface registration from expanding / moving the vectors...
266 * That tends never to happen, so we can hold this lock for a while.
268 vnet_interface_counter_lock (im);
270 vec_foreach (cm, im->sw_if_counters)
272 n_counts = vlib_simple_counter_n_counters (cm);
273 for (i = 0; i < n_counts; i++)
277 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
280 mp = vl_msg_api_alloc_as_if_client
281 (sizeof (*mp) + items_this_message * sizeof (v));
282 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
283 mp->vnet_counter_type = cm - im->sw_if_counters;
284 mp->first_sw_if_index = htonl (i);
286 vp = (u64 *) mp->data;
288 v = vlib_get_simple_counter (cm, i);
289 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
292 if (mp->count == items_this_message)
294 mp->count = htonl (items_this_message);
295 /* Send to the main thread... */
296 vl_msg_api_send_shmem (q, (u8 *) & mp);
302 vnet_interface_counter_unlock (im);
306 do_combined_interface_counters (stats_main_t * sm)
308 vl_api_vnet_interface_combined_counters_t *mp = 0;
309 vnet_interface_main_t *im = sm->interface_main;
310 api_main_t *am = sm->api_main;
311 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
312 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
313 vlib_combined_counter_main_t *cm;
314 u32 items_this_message = 0;
315 vlib_counter_t v, *vp = 0;
318 vnet_interface_counter_lock (im);
320 vec_foreach (cm, im->combined_sw_if_counters)
322 n_counts = vlib_combined_counter_n_counters (cm);
323 for (i = 0; i < n_counts; i++)
327 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
330 mp = vl_msg_api_alloc_as_if_client
331 (sizeof (*mp) + items_this_message * sizeof (v));
332 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
333 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
334 mp->first_sw_if_index = htonl (i);
336 vp = (vlib_counter_t *) mp->data;
338 vlib_get_combined_counter (cm, i, &v);
339 clib_mem_unaligned (&vp->packets, u64)
340 = clib_host_to_net_u64 (v.packets);
341 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
344 if (mp->count == items_this_message)
346 mp->count = htonl (items_this_message);
347 /* Send to the main thread... */
348 vl_msg_api_send_shmem (q, (u8 *) & mp);
354 vnet_interface_counter_unlock (im);
358 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
360 struct timespec _req, *req = &_req;
361 struct timespec _rem, *rem = &_rem;
367 if (nanosleep (req, rem) == 0)
372 clib_unix_warning ("nanosleep");
378 * @brief The context passed when collecting adjacency counters
380 typedef struct ip4_nbr_stats_ctx_t_
383 * The SW IF index all these adjs belong to
388 * A vector of ip4 nbr counters
390 vl_api_ip4_nbr_counter_t *counters;
391 } ip4_nbr_stats_ctx_t;
394 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
396 vl_api_ip4_nbr_counter_t *vl_counter;
397 vlib_counter_t adj_counter;
398 ip4_nbr_stats_ctx_t *ctx;
402 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
404 if (0 != adj_counter.packets)
406 vec_add2 (ctx->counters, vl_counter, 1);
409 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
410 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
411 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
412 vl_counter->link_type = adj->ia_link;
414 return (ADJ_WALK_RC_CONTINUE);
417 #define MIN(x,y) (((x)<(y))?(x):(y))
420 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
422 api_main_t *am = sm->api_main;
423 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
424 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
425 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
429 * If the walk context has counters, which may be left over from the last
430 * suspend, then we continue from there.
432 while (0 != vec_len (ctx->counters))
434 u32 n_items = MIN (vec_len (ctx->counters),
435 IP4_FIB_COUNTER_BATCH_SIZE);
438 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
440 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
443 (vl_api_ip4_nbr_counter_t)));
444 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
445 mp->count = ntohl (n_items);
446 mp->sw_if_index = ntohl (ctx->sw_if_index);
451 * copy the counters from the back of the context, then we can easily
452 * 'erase' them by resetting the vector length.
453 * The order we push the stats to the caller is not important.
456 &ctx->counters[vec_len (ctx->counters) - n_items],
457 n_items * sizeof (*ctx->counters));
459 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
464 unix_shared_memory_queue_lock (q);
465 pause = unix_shared_memory_queue_is_full (q);
467 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
468 unix_shared_memory_queue_unlock (q);
472 ip46_fib_stats_delay (sm, 0 /* sec */ ,
473 STATS_RELEASE_DELAY_NS);
478 do_ip4_nbrs (stats_main_t * sm)
480 vnet_main_t *vnm = vnet_get_main ();
481 vnet_interface_main_t *im = &vnm->interface_main;
482 vnet_sw_interface_t *si;
484 ip4_nbr_stats_ctx_t ctx = {
490 pool_foreach (si, im->sw_interfaces,
493 * update the interface we are now concerned with
495 ctx.sw_if_index = si->sw_if_index;
498 * we are about to walk another interface, so we shouldn't have any pending
501 ASSERT(ctx.counters == NULL);
504 * visit each neighbour adjacency on the interface and collect
506 * Because we hold the lock the walk is synchronous, so safe to routing
507 * updates. It's limited in work by the number of adjacenies on an
508 * interface, which is typically not huge.
510 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
511 adj_nbr_walk (si->sw_if_index,
518 * if this interface has some adjacencies with counters then ship them,
519 * else continue to the next interface.
521 if (NULL != ctx.counters)
523 ip4_nbr_ship(sm, &ctx);
530 * @brief The context passed when collecting adjacency counters
532 typedef struct ip6_nbr_stats_ctx_t_
535 * The SW IF index all these adjs belong to
540 * A vector of ip6 nbr counters
542 vl_api_ip6_nbr_counter_t *counters;
543 } ip6_nbr_stats_ctx_t;
546 ip6_nbr_stats_cb (adj_index_t ai,
549 vl_api_ip6_nbr_counter_t *vl_counter;
550 vlib_counter_t adj_counter;
551 ip6_nbr_stats_ctx_t *ctx;
555 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
557 if (0 != adj_counter.packets)
559 vec_add2(ctx->counters, vl_counter, 1);
562 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
563 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
564 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
565 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
566 vl_counter->link_type = adj->ia_link;
568 return (ADJ_WALK_RC_CONTINUE);
571 #define MIN(x,y) (((x)<(y))?(x):(y))
574 ip6_nbr_ship (stats_main_t * sm,
575 ip6_nbr_stats_ctx_t *ctx)
577 api_main_t *am = sm->api_main;
578 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
579 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
580 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
584 * If the walk context has counters, which may be left over from the last
585 * suspend, then we continue from there.
587 while (0 != vec_len(ctx->counters))
589 u32 n_items = MIN (vec_len (ctx->counters),
590 IP6_FIB_COUNTER_BATCH_SIZE);
593 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
595 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
598 (vl_api_ip6_nbr_counter_t)));
599 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
600 mp->count = ntohl (n_items);
601 mp->sw_if_index = ntohl (ctx->sw_if_index);
606 * copy the counters from the back of the context, then we can easily
607 * 'erase' them by resetting the vector length.
608 * The order we push the stats to the caller is not important.
611 &ctx->counters[vec_len (ctx->counters) - n_items],
612 n_items * sizeof (*ctx->counters));
614 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
619 unix_shared_memory_queue_lock (q);
620 pause = unix_shared_memory_queue_is_full (q);
622 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
623 unix_shared_memory_queue_unlock (q);
627 ip46_fib_stats_delay (sm, 0 /* sec */ ,
628 STATS_RELEASE_DELAY_NS);
633 do_ip6_nbrs (stats_main_t * sm)
635 vnet_main_t *vnm = vnet_get_main ();
636 vnet_interface_main_t *im = &vnm->interface_main;
637 vnet_sw_interface_t *si;
639 ip6_nbr_stats_ctx_t ctx = {
645 pool_foreach (si, im->sw_interfaces,
648 * update the interface we are now concerned with
650 ctx.sw_if_index = si->sw_if_index;
653 * we are about to walk another interface, so we shouldn't have any pending
656 ASSERT(ctx.counters == NULL);
659 * visit each neighbour adjacency on the interface and collect
661 * Because we hold the lock the walk is synchronous, so safe to routing
662 * updates. It's limited in work by the number of adjacenies on an
663 * interface, which is typically not huge.
665 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
666 adj_nbr_walk (si->sw_if_index,
673 * if this interface has some adjacencies with counters then ship them,
674 * else continue to the next interface.
676 if (NULL != ctx.counters)
678 ip6_nbr_ship(sm, &ctx);
685 do_ip4_fibs (stats_main_t * sm)
687 ip4_main_t *im4 = &ip4_main;
688 api_main_t *am = sm->api_main;
689 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
690 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
694 do_ip46_fibs_t *do_fibs;
695 vl_api_vnet_ip4_fib_counters_t *mp = 0;
696 u32 items_this_message;
697 vl_api_ip4_fib_counter_t *ctrp = 0;
698 u32 start_at_fib_index = 0;
701 do_fibs = &sm->do_ip46_fibs;
704 vec_reset_length (do_fibs->fibs);
706 pool_foreach (fib, im4->fibs,
707 ({vec_add1(do_fibs->fibs,fib);}));
711 for (j = 0; j < vec_len (do_fibs->fibs); j++)
713 fib = do_fibs->fibs[j];
714 /* We may have bailed out due to control-plane activity */
715 while ((fib - im4->fibs) < start_at_fib_index)
718 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
722 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
723 mp = vl_msg_api_alloc_as_if_client
725 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
726 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
728 mp->vrf_id = ntohl (fib->ft_table_id);
729 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
733 /* happens if the last FIB was empty... */
734 ASSERT (mp->count == 0);
735 mp->vrf_id = ntohl (fib->ft_table_id);
738 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
740 vec_reset_length (do_fibs->ip4routes);
741 vec_reset_length (do_fibs->results);
743 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
745 uword *hash = v4_fib->fib_entry_by_dst_address[i];
749 vec_reset_length (do_fibs->pvec);
751 x.address_length = i;
753 hash_foreach_pair (p, hash, (
755 vec_add1 (do_fibs->pvec, p);}
757 for (k = 0; k < vec_len (do_fibs->pvec); k++)
759 p = do_fibs->pvec[k];
760 x.address.data_u32 = p->key;
761 x.index = p->value[0];
763 vec_add1 (do_fibs->ip4routes, x);
764 if (sm->data_structure_lock->release_hint)
766 start_at_fib_index = fib - im4->fibs;
768 ip46_fib_stats_delay (sm, 0 /* sec */ ,
769 STATS_RELEASE_DELAY_NS);
771 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
777 vec_foreach (r, do_fibs->ip4routes)
780 const dpo_id_t *dpo_id;
783 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
784 index = (u32) dpo_id->dpoi_index;
786 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
790 * seen at least one packet, send it.
795 /* already in net byte order */
796 ctrp->address = r->address.as_u32;
797 ctrp->address_length = r->address_length;
798 ctrp->packets = clib_host_to_net_u64 (c.packets);
799 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
803 if (mp->count == items_this_message)
805 mp->count = htonl (items_this_message);
807 * If the main thread's input queue is stuffed,
808 * drop the data structure lock (which the main thread
809 * may want), and take a pause.
811 unix_shared_memory_queue_lock (q);
812 if (unix_shared_memory_queue_is_full (q))
815 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
816 unix_shared_memory_queue_unlock (q);
818 ip46_fib_stats_delay (sm, 0 /* sec */ ,
819 STATS_RELEASE_DELAY_NS);
822 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
823 unix_shared_memory_queue_unlock (q);
825 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
826 mp = vl_msg_api_alloc_as_if_client
828 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
829 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
831 mp->vrf_id = ntohl (fib->ft_table_id);
832 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
834 } /* for each (mp or single) adj */
835 if (sm->data_structure_lock->release_hint)
837 start_at_fib_index = fib - im4->fibs;
839 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
841 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
844 } /* vec_foreach (routes) */
848 /* Flush any data from this fib */
851 mp->count = htonl (mp->count);
852 vl_msg_api_send_shmem (q, (u8 *) & mp);
857 /* If e.g. the last FIB had no reportable routes, free the buffer */
859 vl_msg_api_free (mp);
865 ip6_route_t **routep;
867 } add_routes_in_fib_arg_t;
870 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
872 add_routes_in_fib_arg_t *ap = arg;
873 stats_main_t *sm = ap->sm;
875 if (sm->data_structure_lock->release_hint)
876 clib_longjmp (&sm->jmp_buf, 1);
878 if (kvp->key[2] >> 32 == ap->fib_index)
882 addr = (ip6_address_t *) kvp;
883 vec_add2 (*ap->routep, r, 1);
884 r->address = addr[0];
885 r->address_length = kvp->key[2] & 0xFF;
886 r->index = kvp->value;
891 do_ip6_fibs (stats_main_t * sm)
893 ip6_main_t *im6 = &ip6_main;
894 api_main_t *am = sm->api_main;
895 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
896 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
899 do_ip46_fibs_t *do_fibs;
900 vl_api_vnet_ip6_fib_counters_t *mp = 0;
901 u32 items_this_message;
902 vl_api_ip6_fib_counter_t *ctrp = 0;
903 u32 start_at_fib_index = 0;
904 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
905 add_routes_in_fib_arg_t _a, *a = &_a;
908 do_fibs = &sm->do_ip46_fibs;
910 vec_reset_length (do_fibs->fibs);
912 pool_foreach (fib, im6->fibs,
913 ({vec_add1(do_fibs->fibs,fib);}));
917 for (i = 0; i < vec_len (do_fibs->fibs); i++)
919 fib = do_fibs->fibs[i];
920 /* We may have bailed out due to control-plane activity */
921 while ((fib - im6->fibs) < start_at_fib_index)
926 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
927 mp = vl_msg_api_alloc_as_if_client
929 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
930 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
932 mp->vrf_id = ntohl (fib->ft_table_id);
933 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
936 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
938 vec_reset_length (do_fibs->ip6routes);
939 vec_reset_length (do_fibs->results);
941 a->fib_index = fib - im6->fibs;
942 a->routep = &do_fibs->ip6routes;
945 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
947 start_at_fib_index = fib - im6->fibs;
948 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
953 ip46_fib_stats_delay (sm, 0 /* sec */ ,
954 STATS_RELEASE_DELAY_NS);
956 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
960 vec_foreach (r, do_fibs->ip6routes)
964 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
968 * seen at least one packet, send it.
972 /* already in net byte order */
973 ctrp->address[0] = r->address.as_u64[0];
974 ctrp->address[1] = r->address.as_u64[1];
975 ctrp->address_length = (u8) r->address_length;
976 ctrp->packets = clib_host_to_net_u64 (c.packets);
977 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
981 if (mp->count == items_this_message)
983 mp->count = htonl (items_this_message);
985 * If the main thread's input queue is stuffed,
986 * drop the data structure lock (which the main thread
987 * may want), and take a pause.
989 unix_shared_memory_queue_lock (q);
990 if (unix_shared_memory_queue_is_full (q))
993 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
994 unix_shared_memory_queue_unlock (q);
996 ip46_fib_stats_delay (sm, 0 /* sec */ ,
997 STATS_RELEASE_DELAY_NS);
1000 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1001 unix_shared_memory_queue_unlock (q);
1003 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1004 mp = vl_msg_api_alloc_as_if_client
1006 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1007 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1009 mp->vrf_id = ntohl (fib->ft_table_id);
1010 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1014 if (sm->data_structure_lock->release_hint)
1016 start_at_fib_index = fib - im6->fibs;
1018 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1020 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1023 } /* vec_foreach (routes) */
1027 /* Flush any data from this fib */
1030 mp->count = htonl (mp->count);
1031 vl_msg_api_send_shmem (q, (u8 *) & mp);
1036 /* If e.g. the last FIB had no reportable routes, free the buffer */
1038 vl_msg_api_free (mp);
1042 stats_thread_fn (void *arg)
1044 stats_main_t *sm = &stats_main;
1045 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1046 vlib_thread_main_t *tm = vlib_get_thread_main ();
1048 /* stats thread wants no signals. */
1052 pthread_sigmask (SIG_SETMASK, &s, 0);
1055 if (vec_len (tm->thread_prefix))
1056 vlib_set_thread_name ((char *)
1057 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
1059 clib_mem_set_heap (w->thread_mheap);
1063 /* 10 second poll interval */
1064 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
1066 if (!(sm->enable_poller))
1068 do_simple_interface_counters (sm);
1069 do_combined_interface_counters (sm);
1078 vl_api_vnet_interface_simple_counters_t_handler
1079 (vl_api_vnet_interface_simple_counters_t * mp)
1081 vpe_client_stats_registration_t *reg;
1082 stats_main_t *sm = &stats_main;
1083 unix_shared_memory_queue_t *q, *q_prev = NULL;
1084 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
1088 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
1091 vec_reset_length(sm->regs);
1092 pool_foreach(reg, sm->stats_registrations,
1094 vec_add1(sm->regs,reg);
1097 for (i = 0; i < vec_len (sm->regs); i++)
1100 if (reg->stats_registrations & INTERFACE_SIMPLE_COUNTERS)
1102 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1105 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1107 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
1108 clib_memcpy (mp_copy, mp, mp_size);
1109 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1117 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
1120 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1122 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1126 vl_msg_api_free (mp);
1131 vl_api_vnet_interface_combined_counters_t_handler
1132 (vl_api_vnet_interface_combined_counters_t * mp)
1134 vpe_client_stats_registration_t *reg;
1135 stats_main_t *sm = &stats_main;
1136 unix_shared_memory_queue_t *q, *q_prev = NULL;
1137 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
1140 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
1143 pool_foreach(reg, sm->stats_registrations,
1145 if (reg->stats_registrations & INTERFACE_COMBINED_COUNTERS)
1147 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1150 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1152 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1153 clib_memcpy(mp_copy, mp, mp_size);
1154 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1164 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
1167 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1169 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1173 vl_msg_api_free (mp);
1178 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
1180 vpe_client_stats_registration_t *reg;
1181 stats_main_t *sm = &stats_main;
1182 unix_shared_memory_queue_t *q, *q_prev = NULL;
1183 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
1186 mp_size = sizeof (*mp_copy) +
1187 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
1190 pool_foreach(reg, sm->stats_registrations,
1192 if (reg->stats_registrations & IP4_FIB_COUNTERS)
1194 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1197 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1199 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1200 clib_memcpy(mp_copy, mp, mp_size);
1201 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1209 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1211 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1215 vl_msg_api_free (mp);
1220 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
1222 vpe_client_stats_registration_t *reg;
1223 stats_main_t *sm = &stats_main;
1224 unix_shared_memory_queue_t *q, *q_prev = NULL;
1225 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
1228 mp_size = sizeof (*mp_copy) +
1229 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
1232 pool_foreach(reg, sm->stats_registrations,
1234 if (reg->stats_registrations & IP4_NBR_COUNTERS)
1236 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1239 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1241 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1242 clib_memcpy(mp_copy, mp, mp_size);
1243 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1251 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1253 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1257 vl_msg_api_free (mp);
1262 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
1264 vpe_client_stats_registration_t *reg;
1265 stats_main_t *sm = &stats_main;
1266 unix_shared_memory_queue_t *q, *q_prev = NULL;
1267 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
1270 mp_size = sizeof (*mp_copy) +
1271 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
1274 pool_foreach(reg, sm->stats_registrations,
1276 if (reg->stats_registrations & IP6_FIB_COUNTERS)
1278 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1281 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1283 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1284 clib_memcpy(mp_copy, mp, mp_size);
1285 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1293 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1295 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1299 vl_msg_api_free (mp);
1304 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
1306 vpe_client_stats_registration_t *reg;
1307 stats_main_t *sm = &stats_main;
1308 unix_shared_memory_queue_t *q, *q_prev = NULL;
1309 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
1312 mp_size = sizeof (*mp_copy) +
1313 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
1316 pool_foreach(reg, sm->stats_registrations,
1318 if (reg->stats_registrations & IP6_NBR_COUNTERS)
1320 q = vl_api_client_index_to_input_queue (reg->client.client_index);
1323 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1325 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1326 clib_memcpy(mp_copy, mp, mp_size);
1327 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1335 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1337 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1341 vl_msg_api_free (mp);
1346 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
1348 stats_main_t *sm = &stats_main;
1349 vpe_client_stats_registration_t *rp;
1350 vl_api_want_stats_reply_t *rmp;
1353 unix_shared_memory_queue_t *q;
1355 p = hash_get (sm->stats_registration_hash, mp->client_index);
1358 if (mp->enable_disable)
1360 clib_warning ("pid %d: already enabled...", mp->pid);
1366 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1367 pool_put (sm->stats_registrations, rp);
1368 hash_unset (sm->stats_registration_hash, mp->client_index);
1372 if (mp->enable_disable == 0)
1374 clib_warning ("pid %d: already disabled...", mp->pid);
1378 pool_get (sm->stats_registrations, rp);
1379 rp->client.client_index = mp->client_index;
1380 rp->client.client_pid = mp->pid;
1381 rp->stats_registrations |= INTERFACE_SIMPLE_COUNTERS;
1382 rp->stats_registrations |= INTERFACE_COMBINED_COUNTERS;
1383 rp->stats_registrations |= IP4_FIB_COUNTERS;
1384 rp->stats_registrations |= IP4_NBR_COUNTERS;
1385 rp->stats_registrations |= IP6_FIB_COUNTERS;
1386 rp->stats_registrations |= IP6_NBR_COUNTERS;
1388 hash_set (sm->stats_registration_hash, rp->client.client_index,
1389 rp - sm->stats_registrations);
1392 if (pool_elts (sm->stats_registrations))
1393 sm->enable_poller = 1;
1395 sm->enable_poller = 0;
1397 q = vl_api_client_index_to_input_queue (mp->client_index);
1402 rmp = vl_msg_api_alloc (sizeof (*rmp));
1403 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
1404 rmp->context = mp->context;
1405 rmp->retval = retval;
1407 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1411 vl_api_want_interface_simple_stats_t_handler
1412 (vl_api_want_interface_simple_stats_t * mp)
1414 stats_main_t *sm = &stats_main;
1415 vpe_client_stats_registration_t *rp;
1416 vl_api_want_interface_simple_stats_reply_t *rmp;
1419 unix_shared_memory_queue_t *q;
1421 p = hash_get (sm->stats_registration_hash, mp->client_index);
1424 if (mp->enable_disable == 0)
1426 if (!p) // No client to disable
1428 clib_warning ("pid %d: already disabled for stats...", mp->pid);
1433 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1434 if (!rp->stats_registrations & INTERFACE_SIMPLE_COUNTERS) // Client but doesn't want this.
1437 ("pid %d: already disabled for interface simple stats...",
1444 rp->stats_registrations &= ~(INTERFACE_SIMPLE_COUNTERS); // Clear flag
1445 if (rp->stats_registrations == 0) // Client isn't listening to anything else
1447 pool_put (sm->stats_registrations, rp);
1448 hash_unset (sm->stats_registration_hash, mp->client_index);
1454 /* Get client from pool */
1456 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1458 if (!p || !rp) // Doesn't exist, make a new entry
1460 pool_get (sm->stats_registrations, rp);
1461 rp->client.client_index = mp->client_index;
1462 rp->client.client_pid = mp->pid;
1464 rp->stats_registrations |= INTERFACE_SIMPLE_COUNTERS;
1465 hash_set (sm->stats_registration_hash, rp->client.client_index,
1466 rp - sm->stats_registrations);
1469 if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now.
1470 sm->enable_poller = 1;
1472 sm->enable_poller = 0;
1474 q = vl_api_client_index_to_input_queue (mp->client_index);
1479 rmp = vl_msg_api_alloc (sizeof (*rmp));
1480 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
1481 rmp->context = mp->context;
1482 rmp->retval = retval;
1484 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1488 vl_api_want_interface_combined_stats_t_handler
1489 (vl_api_want_interface_combined_stats_t * mp)
1491 stats_main_t *sm = &stats_main;
1492 vpe_client_stats_registration_t *rp;
1493 vl_api_want_interface_combined_stats_reply_t *rmp;
1496 unix_shared_memory_queue_t *q;
1498 p = hash_get (sm->stats_registration_hash, mp->client_index);
1501 if (mp->enable_disable == 0)
1503 if (!p) // No client to disable
1505 clib_warning ("pid %d: already disabled for stats...", mp->pid);
1510 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1511 if (!(rp->stats_registrations & INTERFACE_COMBINED_COUNTERS)) // Client but doesn't want this.
1514 ("pid %d: already disabled for interface COMBINED stats...",
1521 rp->stats_registrations &= ~(INTERFACE_COMBINED_COUNTERS); // Clear flag
1522 if (rp->stats_registrations == 0) // Client isn't listening to anything else
1524 pool_put (sm->stats_registrations, rp);
1525 hash_unset (sm->stats_registration_hash, mp->client_index);
1531 /* Get client from pool */
1533 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1535 if (!p || !rp) // Doesn't exist, make a new entry
1537 pool_get (sm->stats_registrations, rp);
1538 rp->client.client_index = mp->client_index;
1539 rp->client.client_pid = mp->pid;
1541 rp->stats_registrations |= INTERFACE_COMBINED_COUNTERS;
1542 hash_set (sm->stats_registration_hash, rp->client.client_index,
1543 rp - sm->stats_registrations);
1546 if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now.
1547 sm->enable_poller = 1;
1549 sm->enable_poller = 0;
1551 q = vl_api_client_index_to_input_queue (mp->client_index);
1556 rmp = vl_msg_api_alloc (sizeof (*rmp));
1557 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
1558 rmp->context = mp->context;
1559 rmp->retval = retval;
1561 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1565 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
1567 stats_main_t *sm = &stats_main;
1568 vpe_client_stats_registration_t *rp;
1569 vl_api_want_ip4_fib_stats_reply_t *rmp;
1572 unix_shared_memory_queue_t *q;
1574 p = hash_get (sm->stats_registration_hash, mp->client_index);
1578 $$$ FIXME: need std return codes. Still undecided if enabling already
1579 enabled (and similar for disabled) is really a -'ve error condition or
1582 if (mp->enable_disable == 0)
1584 if (!p) // No client to disable
1586 clib_warning ("pid %d: already disabled for stats...", mp->pid);
1591 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1592 if (!(rp->stats_registrations & IP4_FIB_COUNTERS)) // Client but doesn't want this.
1594 clib_warning ("pid %d: already disabled for interface ip4 fib...",
1601 rp->stats_registrations &= ~(IP4_FIB_COUNTERS); // Clear flag
1602 if (rp->stats_registrations == 0) // Client isn't listening to anything else
1604 pool_put (sm->stats_registrations, rp);
1605 hash_unset (sm->stats_registration_hash, mp->client_index);
1611 /* Get client from pool */
1613 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1615 if (!p || !rp) // Doesn't exist, make a new entry
1617 pool_get (sm->stats_registrations, rp);
1618 rp->client.client_index = mp->client_index;
1619 rp->client.client_pid = mp->pid;
1621 rp->stats_registrations |= IP4_FIB_COUNTERS;
1622 hash_set (sm->stats_registration_hash, rp->client.client_index,
1623 rp - sm->stats_registrations);
1626 if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now.
1627 sm->enable_poller = 1;
1629 sm->enable_poller = 0;
1631 q = vl_api_client_index_to_input_queue (mp->client_index);
1636 rmp = vl_msg_api_alloc (sizeof (*rmp));
1637 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
1638 rmp->context = mp->context;
1639 rmp->retval = retval;
1641 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1645 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
1647 stats_main_t *sm = &stats_main;
1648 vpe_client_stats_registration_t *rp;
1649 vl_api_want_ip6_fib_stats_reply_t *rmp;
1652 unix_shared_memory_queue_t *q;
1654 p = hash_get (sm->stats_registration_hash, mp->client_index);
1658 $$$ FIXME: need std return codes. Still undecided if enabling already
1659 enabled (and similar for disabled) is really a -'ve error condition or
1662 if (mp->enable_disable == 0)
1664 if (!p) // No client to disable
1666 clib_warning ("pid %d: already disabled for stats...", mp->pid);
1671 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1672 if (!(rp->stats_registrations & IP6_FIB_COUNTERS)) // Client but doesn't want this.
1674 clib_warning ("pid %d: already disabled for interface ip6 fib...",
1681 rp->stats_registrations &= ~(IP6_FIB_COUNTERS); // Clear flag
1682 if (rp->stats_registrations == 0) // Client isn't listening to anything else
1684 pool_put (sm->stats_registrations, rp);
1685 hash_unset (sm->stats_registration_hash, mp->client_index);
1691 /* Get client from pool */
1693 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1695 if (!p || !rp) // Doesn't exist, make a new entry
1697 pool_get (sm->stats_registrations, rp);
1698 rp->client.client_index = mp->client_index;
1699 rp->client.client_pid = mp->pid;
1701 rp->stats_registrations |= IP6_FIB_COUNTERS;
1702 hash_set (sm->stats_registration_hash, rp->client.client_index,
1703 rp - sm->stats_registrations);
1706 if (pool_elts (sm->stats_registrations)) // Someone wants something, somewhere so enable globally for now.
1707 sm->enable_poller = 1;
1709 sm->enable_poller = 0;
1711 q = vl_api_client_index_to_input_queue (mp->client_index);
1716 rmp = vl_msg_api_alloc (sizeof (*rmp));
1717 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
1718 rmp->context = mp->context;
1719 rmp->retval = retval;
1721 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1724 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
1726 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
1731 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
1736 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
1738 stats_main_t *sm = &stats_main;
1739 vnet_interface_main_t *im = sm->interface_main;
1740 vl_api_vnet_get_summary_stats_reply_t *rmp;
1741 vlib_combined_counter_main_t *cm;
1744 u64 total_pkts[VLIB_N_RX_TX];
1745 u64 total_bytes[VLIB_N_RX_TX];
1747 unix_shared_memory_queue_t *q =
1748 vl_api_client_index_to_input_queue (mp->client_index);
1753 rmp = vl_msg_api_alloc (sizeof (*rmp));
1754 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
1755 rmp->context = mp->context;
1758 memset (total_pkts, 0, sizeof (total_pkts));
1759 memset (total_bytes, 0, sizeof (total_bytes));
1761 vnet_interface_counter_lock (im);
1763 vec_foreach (cm, im->combined_sw_if_counters)
1765 which = cm - im->combined_sw_if_counters;
1767 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
1769 vlib_get_combined_counter (cm, i, &v);
1770 total_pkts[which] += v.packets;
1771 total_bytes[which] += v.bytes;
1774 vnet_interface_counter_unlock (im);
1776 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
1777 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
1778 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
1779 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
1781 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
1783 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1787 stats_memclnt_delete_callback (u32 client_index)
1789 vpe_client_stats_registration_t *rp;
1790 stats_main_t *sm = &stats_main;
1793 p = hash_get (sm->stats_registration_hash, client_index);
1796 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1797 pool_put (sm->stats_registrations, rp);
1798 hash_unset (sm->stats_registration_hash, client_index);
1804 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
1805 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
1806 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
1807 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
1808 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
1809 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
1810 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
1811 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
1812 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
1813 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
1814 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
1815 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
1817 static clib_error_t *
1818 stats_init (vlib_main_t * vm)
1820 stats_main_t *sm = &stats_main;
1821 api_main_t *am = &api_main;
1822 void *vlib_worker_thread_bootstrap_fn (void *arg);
1825 sm->vnet_main = vnet_get_main ();
1826 sm->interface_main = &vnet_get_main ()->interface_main;
1828 sm->stats_poll_interval_in_seconds = 10;
1829 sm->data_structure_lock =
1830 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
1831 CLIB_CACHE_LINE_BYTES);
1832 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
1835 vl_msg_api_set_handlers(VL_API_##N, #n, \
1836 vl_api_##n##_t_handler, \
1838 vl_api_##n##_t_endian, \
1839 vl_api_##n##_t_print, \
1840 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
1844 /* tell the msg infra not to free these messages... */
1845 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
1846 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
1847 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
1848 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
1849 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
1850 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
1853 * Set up the (msg_name, crc, message-id) table
1855 setup_message_id_table (am);
1860 VLIB_INIT_FUNCTION (stats_init);
1863 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
1865 .function = stats_thread_fn,
1868 .no_data_structure_clone = 1,
1874 * fd.io coding-style-patch-verification: ON
1877 * eval: (c-set-style "gnu")