2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
25 stats_main_t stats_main;
27 #include <vnet/ip/ip.h>
29 #include <vpp/api/vpe_msg_enum.h>
32 #define f64_print(a,b)
34 #define vl_typedefs /* define message structures */
35 #include <vpp/api/vpe_all_api_h.h>
38 #define vl_endianfun /* define message structures */
39 #include <vpp/api/vpe_all_api_h.h>
42 /* instantiate all the print functions we know about */
43 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
45 #include <vpp/api/vpe_all_api_h.h>
48 #define foreach_stats_msg \
49 _(WANT_STATS, want_stats) \
50 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
51 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
52 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
53 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
55 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
56 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
57 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
58 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
59 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
60 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
61 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
62 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
63 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
64 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
65 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
66 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
67 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
68 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats) \
69 _(MAP_STATS_SEGMENT, map_stats_segment)
71 #define vl_msg_name_crc_list
72 #include <vpp/stats/stats.api.h>
73 #undef vl_msg_name_crc_list
76 setup_message_id_table (api_main_t * am)
79 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
80 foreach_vl_msg_name_crc_stats;
84 /* These constants ensure msg sizes <= 1024, aka ring allocation */
85 #define SIMPLE_COUNTER_BATCH_SIZE 126
86 #define COMBINED_COUNTER_BATCH_SIZE 63
87 #define IP4_FIB_COUNTER_BATCH_SIZE 48
88 #define IP6_FIB_COUNTER_BATCH_SIZE 30
89 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
90 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
91 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
94 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
98 format_vnet_interface_combined_counters (u8 * s, va_list * args)
100 stats_main_t *sm = &stats_main;
101 vl_api_vnet_interface_combined_counters_t *mp =
102 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
105 u32 count, sw_if_index;
107 count = ntohl (mp->count);
108 sw_if_index = ntohl (mp->first_sw_if_index);
112 vp = (vlib_counter_t *) mp->data;
114 switch (mp->vnet_counter_type)
116 case VNET_INTERFACE_COUNTER_RX:
119 case VNET_INTERFACE_COUNTER_TX:
123 counter_name = "bogus";
126 for (i = 0; i < count; i++)
128 packets = clib_mem_unaligned (&vp->packets, u64);
129 packets = clib_net_to_host_u64 (packets);
130 bytes = clib_mem_unaligned (&vp->bytes, u64);
131 bytes = clib_net_to_host_u64 (bytes);
133 s = format (s, "%U.%s.packets %lld\n",
134 format_vnet_sw_if_index_name,
135 sm->vnet_main, sw_if_index, counter_name, packets);
136 s = format (s, "%U.%s.bytes %lld\n",
137 format_vnet_sw_if_index_name,
138 sm->vnet_main, sw_if_index, counter_name, bytes);
145 format_vnet_interface_simple_counters (u8 * s, va_list * args)
147 stats_main_t *sm = &stats_main;
148 vl_api_vnet_interface_simple_counters_t *mp =
149 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
151 u32 count, sw_if_index;
152 count = ntohl (mp->count);
153 sw_if_index = ntohl (mp->first_sw_if_index);
155 vp = (u64 *) mp->data;
158 switch (mp->vnet_counter_type)
160 case VNET_INTERFACE_COUNTER_DROP:
161 counter_name = "drop";
163 case VNET_INTERFACE_COUNTER_PUNT:
164 counter_name = "punt";
166 case VNET_INTERFACE_COUNTER_IP4:
167 counter_name = "ip4";
169 case VNET_INTERFACE_COUNTER_IP6:
170 counter_name = "ip6";
172 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
173 counter_name = "rx-no-buff";
175 case VNET_INTERFACE_COUNTER_RX_MISS:
176 counter_name = "rx-miss";
178 case VNET_INTERFACE_COUNTER_RX_ERROR:
179 counter_name = "rx-error (fifo-full)";
181 case VNET_INTERFACE_COUNTER_TX_ERROR:
182 counter_name = "tx-error (fifo-full)";
185 counter_name = "bogus";
188 for (i = 0; i < count; i++)
190 v = clib_mem_unaligned (vp, u64);
191 v = clib_net_to_host_u64 (v);
193 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
194 sm->vnet_main, sw_if_index, counter_name, v);
202 dslock (stats_main_t * sm, int release_hint, int tag)
205 data_structure_lock_t *l = sm->data_structure_lock;
207 if (PREDICT_FALSE (l == 0))
210 thread_index = vlib_get_thread_index ();
211 if (l->lock && l->thread_index == thread_index)
220 while (__sync_lock_test_and_set (&l->lock, 1))
223 l->thread_index = thread_index;
228 stats_dslock_with_hint (int hint, int tag)
230 stats_main_t *sm = &stats_main;
231 dslock (sm, hint, tag);
235 dsunlock (stats_main_t * sm)
238 data_structure_lock_t *l = sm->data_structure_lock;
240 if (PREDICT_FALSE (l == 0))
243 thread_index = vlib_get_thread_index ();
244 ASSERT (l->lock && l->thread_index == thread_index);
250 CLIB_MEMORY_BARRIER ();
256 stats_dsunlock (int hint, int tag)
258 stats_main_t *sm = &stats_main;
262 static vpe_client_registration_t *
263 get_client_for_stat (u32 reg, u32 item, u32 client_index)
265 stats_main_t *sm = &stats_main;
266 vpe_client_stats_registration_t *registration;
269 /* Is there anything listening for item in that reg */
270 p = hash_get (sm->stats_registration_hash[reg], item);
275 /* If there is, is our client_index one of them */
276 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
277 p = hash_get (registration->client_hash, client_index);
282 return pool_elt_at_index (registration->clients, p[0]);
287 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
289 stats_main_t *sm = &stats_main;
290 vpe_client_stats_registration_t *registration;
291 vpe_client_registration_t *cr;
294 /* Is there anything listening for item in that reg */
295 p = hash_get (sm->stats_registration_hash[reg], item);
299 pool_get (sm->stats_registrations[reg], registration);
300 registration->item = item;
301 registration->client_hash = NULL;
302 registration->clients = NULL;
303 hash_set (sm->stats_registration_hash[reg], item,
304 registration - sm->stats_registrations[reg]);
308 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
311 p = hash_get (registration->client_hash, client->client_index);
315 pool_get (registration->clients, cr);
316 cr->client_index = client->client_index;
317 cr->client_pid = client->client_pid;
318 hash_set (registration->client_hash, cr->client_index,
319 cr - registration->clients);
322 return 1; //At least one client is doing something ... poll
326 clear_one_client (u32 reg_index, u32 reg, u32 item, u32 client_index)
328 stats_main_t *sm = &stats_main;
329 vpe_client_stats_registration_t *registration;
330 vpe_client_registration_t *client;
333 registration = pool_elt_at_index (sm->stats_registrations[reg], reg_index);
334 p = hash_get (registration->client_hash, client_index);
338 client = pool_elt_at_index (registration->clients, p[0]);
339 hash_unset (registration->client_hash, client->client_index);
340 pool_put (registration->clients, client);
342 /* Now check if that was the last client for that item */
343 if (0 == pool_elts (registration->clients))
345 hash_unset (sm->stats_registration_hash[reg], item);
346 hash_free (registration->client_hash);
347 pool_free (registration->clients);
348 pool_put (sm->stats_registrations[reg], registration);
354 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
356 stats_main_t *sm = &stats_main;
360 /* Clear the client first */
361 /* Is there anything listening for item in that reg */
362 p = hash_get (sm->stats_registration_hash[reg], item);
367 /* If there is, is our client_index one of them */
368 clear_one_client (p[0], reg, item, client_index);
372 /* Now check if that was the last item in any of the listened to stats */
373 for (i = 0; i < STATS_REG_N_IDX; i++)
375 elts += pool_elts (sm->stats_registrations[i]);
381 clear_client_for_all_stats (u32 client_index)
383 stats_main_t *sm = &stats_main;
384 u32 reg_index, item, reg;
388 vec_foreach_index(reg, sm->stats_registration_hash)
390 hash_foreach(item, reg_index, sm->stats_registration_hash[reg],
392 clear_one_client(reg_index, reg, item, client_index);
398 /* Now check if that was the last item in any of the listened to stats */
399 for (i = 0; i < STATS_REG_N_IDX; i++)
401 elts += pool_elts (sm->stats_registrations[i]);
406 static clib_error_t *
407 want_stats_reaper (u32 client_index)
409 stats_main_t *sm = &stats_main;
411 sm->enable_poller = clear_client_for_all_stats (client_index);
416 VL_MSG_API_REAPER_FUNCTION (want_stats_reaper);
420 * Return a copy of the clients list.
422 vpe_client_registration_t *
423 get_clients_for_stat (u32 reg, u32 item)
425 stats_main_t *sm = &stats_main;
426 vpe_client_registration_t *client, *clients = 0;
427 vpe_client_stats_registration_t *registration;
430 /* Is there anything listening for item in that reg */
431 p = hash_get (sm->stats_registration_hash[reg], item);
436 /* If there is, is our client_index one of them */
437 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
439 vec_reset_length (clients);
442 pool_foreach (client, registration->clients,
444 vec_add1 (clients, *client);}
452 clear_client_reg (u32 ** registrations)
454 /* When registrations[x] is a vector of pool indices
455 here is a good place to clean up the pools
457 #define stats_reg(n) vec_free(registrations[IDX_##n]);
458 #include <vpp/stats/stats.reg>
461 vec_free (registrations);
465 init_client_reg (u32 ** registrations)
469 Initialise the stats registrations for each
470 type of stat a client can register for as well as
471 a vector of "interested" indexes.
472 Initially this is a u32 of either sw_if_index or fib_index
473 but eventually this should migrate to a pool_index (u32)
474 with a type specific pool that can include more complex things
475 such as timing and structured events.
477 vec_validate (registrations, STATS_REG_N_IDX);
478 #define stats_reg(n) \
479 vec_reset_length(registrations[IDX_##n]);
480 #include <vpp/stats/stats.reg>
484 When registrations[x] is a vector of pool indices, here
485 is a good place to init the pools.
487 return registrations;
491 enable_all_client_reg (u32 ** registrations)
495 Enable all stats known by adding
496 ~0 to the index vector. Eventually this
497 should be deprecated.
499 #define stats_reg(n) \
500 vec_add1(registrations[IDX_##n], ~0);
501 #include <vpp/stats/stats.reg>
503 return registrations;
507 do_simple_interface_counters (stats_main_t * sm)
509 vl_api_vnet_interface_simple_counters_t *mp = 0;
510 vnet_interface_main_t *im = sm->interface_main;
511 api_main_t *am = sm->api_main;
512 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
513 svm_queue_t *q = shmem_hdr->vl_input_queue;
514 vlib_simple_counter_main_t *cm;
515 u32 items_this_message = 0;
520 * Prevent interface registration from expanding / moving the vectors...
521 * That tends never to happen, so we can hold this lock for a while.
523 vnet_interface_counter_lock (im);
525 vec_foreach (cm, im->sw_if_counters)
527 n_counts = vlib_simple_counter_n_counters (cm);
528 for (i = 0; i < n_counts; i++)
532 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
535 mp = vl_msg_api_alloc_as_if_client
536 (sizeof (*mp) + items_this_message * sizeof (v));
537 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
538 mp->vnet_counter_type = cm - im->sw_if_counters;
539 mp->first_sw_if_index = htonl (i);
541 vp = (u64 *) mp->data;
543 v = vlib_get_simple_counter (cm, i);
544 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
547 if (mp->count == items_this_message)
549 mp->count = htonl (items_this_message);
550 /* Send to the main thread... */
551 vl_msg_api_send_shmem (q, (u8 *) & mp);
557 vnet_interface_counter_unlock (im);
561 handle_client_registration (vpe_client_registration_t * client, u32 stat,
562 u32 item, int enable_disable)
564 stats_main_t *sm = &stats_main;
565 vpe_client_registration_t *rp, _rp;
567 rp = get_client_for_stat (stat, item, client->client_index);
570 if (enable_disable == 0)
572 if (!rp) // No client to disable
574 clib_warning ("pid %d: already disabled for stats...",
579 clear_client_for_stat (stat, item, client->client_index);
586 rp->client_index = client->client_index;
587 rp->client_pid = client->client_pid;
588 sm->enable_poller = set_client_for_stat (stat, item, rp);
593 /**********************************
594 * ALL Interface Combined stats - to be deprecated
595 **********************************/
598 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
601 vl_api_want_interface_combined_stats_t_handler
602 (vl_api_want_interface_combined_stats_t * mp)
604 stats_main_t *sm = &stats_main;
605 vpe_client_registration_t rp;
606 vl_api_want_interface_combined_stats_reply_t *rmp;
609 vl_api_registration_t *reg;
612 swif = ~0; //Using same mechanism as _per_interface_
613 rp.client_index = mp->client_index;
614 rp.client_pid = mp->pid;
616 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
620 reg = vl_api_client_index_to_registration (mp->client_index);
624 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
629 rmp = vl_msg_api_alloc (sizeof (*rmp));
630 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
631 rmp->context = mp->context;
632 rmp->retval = retval;
634 vl_api_send_msg (reg, (u8 *) rmp);
638 vl_api_vnet_interface_combined_counters_t_handler
639 (vl_api_vnet_interface_combined_counters_t * mp)
641 vpe_client_registration_t *clients, client;
642 stats_main_t *sm = &stats_main;
643 vl_api_registration_t *reg, *reg_prev = NULL;
644 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
648 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
651 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
652 ~0 /*flag for all */ );
654 for (i = 0; i < vec_len (clients); i++)
657 reg = vl_api_client_index_to_registration (client.client_index);
660 if (reg_prev && vl_api_can_send_msg (reg_prev))
662 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
663 clib_memcpy (mp_copy, mp, mp_size);
664 vl_api_send_msg (reg_prev, (u8 *) mp);
672 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
675 if (reg_prev && vl_api_can_send_msg (reg_prev))
677 vl_api_send_msg (reg_prev, (u8 *) mp);
681 vl_msg_api_free (mp);
686 do_combined_interface_counters (stats_main_t * sm)
688 vl_api_vnet_interface_combined_counters_t *mp = 0;
689 vnet_interface_main_t *im = sm->interface_main;
690 api_main_t *am = sm->api_main;
691 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
692 svm_queue_t *q = shmem_hdr->vl_input_queue;
693 vlib_combined_counter_main_t *cm;
694 u32 items_this_message = 0;
695 vlib_counter_t v, *vp = 0;
698 vnet_interface_counter_lock (im);
700 vec_foreach (cm, im->combined_sw_if_counters)
702 n_counts = vlib_combined_counter_n_counters (cm);
703 for (i = 0; i < n_counts; i++)
707 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
710 mp = vl_msg_api_alloc_as_if_client
711 (sizeof (*mp) + items_this_message * sizeof (v));
712 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
713 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
714 mp->first_sw_if_index = htonl (i);
716 vp = (vlib_counter_t *) mp->data;
718 vlib_get_combined_counter (cm, i, &v);
719 clib_mem_unaligned (&vp->packets, u64)
720 = clib_host_to_net_u64 (v.packets);
721 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
724 if (mp->count == items_this_message)
726 mp->count = htonl (items_this_message);
727 /* Send to the main thread... */
728 vl_msg_api_send_shmem (q, (u8 *) & mp);
734 vnet_interface_counter_unlock (im);
737 /**********************************
738 * Per Interface Combined stats
739 **********************************/
741 /* Request from client registering interfaces it wants */
743 vl_api_want_per_interface_combined_stats_t_handler
744 (vl_api_want_per_interface_combined_stats_t * mp)
746 stats_main_t *sm = &stats_main;
747 vpe_client_registration_t rp;
748 vl_api_want_per_interface_combined_stats_reply_t *rmp;
749 vlib_combined_counter_main_t *cm;
752 vl_api_registration_t *reg;
753 u32 i, swif, num = 0;
755 num = ntohl (mp->num);
758 * Validate sw_if_indexes before registering
760 for (i = 0; i < num; i++)
762 swif = ntohl (mp->sw_ifs[i]);
765 * Check its a real sw_if_index that the client is allowed to see
769 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
771 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
777 for (i = 0; i < num; i++)
779 swif = ntohl (mp->sw_ifs[i]);
781 rp.client_index = mp->client_index;
782 rp.client_pid = mp->pid;
783 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
784 swif, ntohl (mp->enable_disable));
788 reg = vl_api_client_index_to_registration (mp->client_index);
791 for (i = 0; i < num; i++)
793 swif = ntohl (mp->sw_ifs[i]);
796 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
802 rmp = vl_msg_api_alloc (sizeof (*rmp));
803 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
804 rmp->context = mp->context;
805 rmp->retval = retval;
807 vl_api_send_msg (reg, (u8 *) rmp);
810 /* Per Interface Combined distribution to client */
812 do_combined_per_interface_counters (stats_main_t * sm)
814 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
815 vnet_interface_main_t *im = sm->interface_main;
816 api_main_t *am = sm->api_main;
817 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
818 vl_api_registration_t *vl_reg;
819 vlib_combined_counter_main_t *cm;
820 vl_api_vnet_combined_counter_t *vp = 0;
823 vpe_client_stats_registration_t *reg;
824 vpe_client_registration_t *client;
825 u32 *sw_if_index = 0;
827 vnet_interface_counter_lock (im);
829 vec_reset_length (sm->regs_tmp);
833 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
834 ({ vec_add1 (sm->regs_tmp, reg); }));
837 for (i = 0; i < vec_len (sm->regs_tmp); i++)
839 reg = sm->regs_tmp[i];
842 vnet_interface_counter_unlock (im);
843 do_combined_interface_counters (sm);
844 vnet_interface_counter_lock (im);
847 vec_reset_length (sm->clients_tmp);
850 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
854 for (j = 0; j < vec_len (sm->clients_tmp); j++)
856 client = sm->clients_tmp[j];
858 vl_reg = vl_api_client_index_to_registration (client->client_index);
860 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
864 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
865 reg->item, client->client_index);
868 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
869 memset (mp, 0, sizeof (*mp));
872 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
875 * count will eventually be used to optimise the batching
876 * of per client messages for each stat. For now setting this to 1 then
877 * iterate. This will not affect API.
879 * FIXME instead of enqueueing here, this should be sent to a batch
880 * storer for per-client transmission. Each "mp" sent would be a single entry
881 * and if a client is listening to other sw_if_indexes for same, it would be
882 * appended to that *mp
886 * - capturing the timestamp of the counters "when VPP knew them" is important.
887 * Less so is that the timing of the delivery to the control plane be in the same
890 * i.e. As long as the control plane can delta messages from VPP and work out
891 * velocity etc based on the timestamp, it can do so in a more "batch mode".
893 * It would be beneficial to keep a "per-client" message queue, and then
894 * batch all the stat messages for a client into one message, with
895 * discrete timestamps.
897 * Given this particular API is for "per interface" one assumes that the scale
898 * is less than the ~0 case, which the prior API is suited for.
902 * 1 message per api call for now
904 mp->count = htonl (1);
905 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
907 vp = (vl_api_vnet_combined_counter_t *) mp->data;
908 vp->sw_if_index = htonl (reg->item);
910 im = &vnet_get_main ()->interface_main;
913 cm = im->combined_sw_if_counters + X; \
914 vlib_get_combined_counter (cm, reg->item, &v); \
915 clib_mem_unaligned (&vp->x##_packets, u64) = \
916 clib_host_to_net_u64 (v.packets); \
917 clib_mem_unaligned (&vp->x##_bytes, u64) = \
918 clib_host_to_net_u64 (v.bytes);
921 _(VNET_INTERFACE_COUNTER_RX, rx);
922 _(VNET_INTERFACE_COUNTER_TX, tx);
923 _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
924 _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
925 _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
926 _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
927 _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
928 _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
932 vl_api_send_msg (vl_reg, (u8 *) mp);
936 vnet_interface_counter_unlock (im);
939 /**********************************
940 * Per Interface simple stats
941 **********************************/
943 /* Request from client registering interfaces it wants */
945 vl_api_want_per_interface_simple_stats_t_handler
946 (vl_api_want_per_interface_simple_stats_t * mp)
948 stats_main_t *sm = &stats_main;
949 vpe_client_registration_t rp;
950 vl_api_want_per_interface_simple_stats_reply_t *rmp;
951 vlib_simple_counter_main_t *cm;
954 vl_api_registration_t *reg;
955 u32 i, swif, num = 0;
957 num = ntohl (mp->num);
959 for (i = 0; i < num; i++)
961 swif = ntohl (mp->sw_ifs[i]);
963 /* Check its a real sw_if_index that the client is allowed to see */
966 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
968 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
974 for (i = 0; i < num; i++)
976 swif = ntohl (mp->sw_ifs[i]);
978 rp.client_index = mp->client_index;
979 rp.client_pid = mp->pid;
980 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
981 swif, ntohl (mp->enable_disable));
985 reg = vl_api_client_index_to_registration (mp->client_index);
987 /* Client may have disconnected abruptly, clean up */
990 for (i = 0; i < num; i++)
992 swif = ntohl (mp->sw_ifs[i]);
994 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
1002 rmp = vl_msg_api_alloc (sizeof (*rmp));
1003 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
1004 rmp->context = mp->context;
1005 rmp->retval = retval;
1007 vl_api_send_msg (reg, (u8 *) rmp);
1010 /* Per Interface Simple distribution to client */
1012 do_simple_per_interface_counters (stats_main_t * sm)
1014 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
1015 vnet_interface_main_t *im = sm->interface_main;
1016 api_main_t *am = sm->api_main;
1017 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1018 vl_api_registration_t *vl_reg;
1019 vlib_simple_counter_main_t *cm;
1021 vpe_client_stats_registration_t *reg;
1022 vpe_client_registration_t *client;
1023 u32 timestamp, count;
1024 vl_api_vnet_simple_counter_t *vp = 0;
1027 vnet_interface_counter_lock (im);
1029 vec_reset_length (sm->regs_tmp);
1033 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
1034 ({ vec_add1 (sm->regs_tmp, reg); }));
1037 for (i = 0; i < vec_len (sm->regs_tmp); i++)
1039 reg = sm->regs_tmp[i];
1040 if (reg->item == ~0)
1042 vnet_interface_counter_unlock (im);
1043 do_simple_interface_counters (sm);
1044 vnet_interface_counter_lock (im);
1047 vec_reset_length (sm->clients_tmp);
1050 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
1054 for (j = 0; j < vec_len (sm->clients_tmp); j++)
1056 client = sm->clients_tmp[j];
1057 vl_reg = vl_api_client_index_to_registration (client->client_index);
1059 /* Client may have disconnected abrubtly, clean up */
1063 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1064 reg->item, client->client_index);
1068 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1069 memset (mp, 0, sizeof (*mp));
1070 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1073 * count will eventually be used to optimise the batching
1074 * of per client messages for each stat. For now setting this to 1 then
1075 * iterate. This will not affect API.
1077 * FIXME instead of enqueueing here, this should be sent to a batch
1078 * storer for per-client transmission. Each "mp" sent would be a single entry
1079 * and if a client is listening to other sw_if_indexes for same, it would be
1080 * appended to that *mp
1084 * - capturing the timestamp of the counters "when VPP knew them" is important.
1085 * Less so is that the timing of the delivery to the control plane be in the same
1088 * i.e. As long as the control plane can delta messages from VPP and work out
1089 * velocity etc based on the timestamp, it can do so in a more "batch mode".
1091 * It would be beneficial to keep a "per-client" message queue, and then
1092 * batch all the stat messages for a client into one message, with
1093 * discrete timestamps.
1095 * Given this particular API is for "per interface" one assumes that the scale
1096 * is less than the ~0 case, which the prior API is suited for.
1100 * 1 message per api call for now
1102 mp->count = htonl (1);
1103 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1104 vp = (vl_api_vnet_simple_counter_t *) mp->data;
1106 vp->sw_if_index = htonl (reg->item);
1108 // VNET_INTERFACE_COUNTER_DROP
1109 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1110 v = vlib_get_simple_counter (cm, reg->item);
1111 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1113 // VNET_INTERFACE_COUNTER_PUNT
1114 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1115 v = vlib_get_simple_counter (cm, reg->item);
1116 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1118 // VNET_INTERFACE_COUNTER_IP4
1119 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1120 v = vlib_get_simple_counter (cm, reg->item);
1121 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1123 //VNET_INTERFACE_COUNTER_IP6
1124 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1125 v = vlib_get_simple_counter (cm, reg->item);
1126 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1128 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1129 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1130 v = vlib_get_simple_counter (cm, reg->item);
1131 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1132 clib_host_to_net_u64 (v);
1134 //VNET_INTERFACE_COUNTER_RX_MISS
1135 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1136 v = vlib_get_simple_counter (cm, reg->item);
1137 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1139 //VNET_INTERFACE_COUNTER_RX_ERROR
1140 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1141 v = vlib_get_simple_counter (cm, reg->item);
1142 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1144 //VNET_INTERFACE_COUNTER_TX_ERROR
1145 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1146 v = vlib_get_simple_counter (cm, reg->item);
1147 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1149 //VNET_INTERFACE_COUNTER_MPLS
1150 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1151 v = vlib_get_simple_counter (cm, reg->item);
1152 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1154 vl_api_send_msg (vl_reg, (u8 *) mp);
1158 vnet_interface_counter_unlock (im);
1161 /**********************************
1163 **********************************/
1166 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1168 struct timespec _req, *req = &_req;
1169 struct timespec _rem, *rem = &_rem;
1172 req->tv_nsec = nsec;
1175 if (nanosleep (req, rem) == 0)
1180 clib_unix_warning ("nanosleep");
1186 * @brief The context passed when collecting adjacency counters
1188 typedef struct ip4_nbr_stats_ctx_t_
1191 * The SW IF index all these adjs belong to
1196 * A vector of ip4 nbr counters
1198 vl_api_ip4_nbr_counter_t *counters;
1199 } ip4_nbr_stats_ctx_t;
1201 static adj_walk_rc_t
1202 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1204 vl_api_ip4_nbr_counter_t *vl_counter;
1205 vlib_counter_t adj_counter;
1206 ip4_nbr_stats_ctx_t *ctx;
1207 ip_adjacency_t *adj;
1210 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1212 if (0 != adj_counter.packets)
1214 vec_add2 (ctx->counters, vl_counter, 1);
1217 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1218 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1219 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1220 vl_counter->link_type = adj->ia_link;
1222 return (ADJ_WALK_RC_CONTINUE);
1225 #define MIN(x,y) (((x)<(y))?(x):(y))
1228 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1230 api_main_t *am = sm->api_main;
1231 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1232 svm_queue_t *q = shmem_hdr->vl_input_queue;
1233 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1237 * If the walk context has counters, which may be left over from the last
1238 * suspend, then we continue from there.
1240 while (0 != vec_len (ctx->counters))
1242 u32 n_items = MIN (vec_len (ctx->counters),
1243 IP4_FIB_COUNTER_BATCH_SIZE);
1246 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1248 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1251 (vl_api_ip4_nbr_counter_t)));
1252 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1253 mp->count = ntohl (n_items);
1254 mp->sw_if_index = ntohl (ctx->sw_if_index);
1259 * copy the counters from the back of the context, then we can easily
1260 * 'erase' them by resetting the vector length.
1261 * The order we push the stats to the caller is not important.
1264 &ctx->counters[vec_len (ctx->counters) - n_items],
1265 n_items * sizeof (*ctx->counters));
1267 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1273 pause = svm_queue_is_full (q);
1275 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1276 svm_queue_unlock (q);
1280 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1281 STATS_RELEASE_DELAY_NS);
1286 do_ip4_nbr_counters (stats_main_t * sm)
1288 vnet_main_t *vnm = vnet_get_main ();
1289 vnet_interface_main_t *im = &vnm->interface_main;
1290 vnet_sw_interface_t *si;
1292 ip4_nbr_stats_ctx_t ctx = {
1298 pool_foreach (si, im->sw_interfaces,
1301 * update the interface we are now concerned with
1303 ctx.sw_if_index = si->sw_if_index;
1306 * we are about to walk another interface, so we shouldn't have any pending
1309 ASSERT(ctx.counters == NULL);
1312 * visit each neighbour adjacency on the interface and collect
1313 * its current stats.
1314 * Because we hold the lock the walk is synchronous, so safe to routing
1315 * updates. It's limited in work by the number of adjacenies on an
1316 * interface, which is typically not huge.
1318 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1319 adj_nbr_walk (si->sw_if_index,
1326 * if this interface has some adjacencies with counters then ship them,
1327 * else continue to the next interface.
1329 if (NULL != ctx.counters)
1331 ip4_nbr_ship(sm, &ctx);
1338 * @brief The context passed when collecting adjacency counters
1340 typedef struct ip6_nbr_stats_ctx_t_
1343 * The SW IF index all these adjs belong to
1348 * A vector of ip6 nbr counters
1350 vl_api_ip6_nbr_counter_t *counters;
1351 } ip6_nbr_stats_ctx_t;
1353 static adj_walk_rc_t
1354 ip6_nbr_stats_cb (adj_index_t ai,
1357 vl_api_ip6_nbr_counter_t *vl_counter;
1358 vlib_counter_t adj_counter;
1359 ip6_nbr_stats_ctx_t *ctx;
1360 ip_adjacency_t *adj;
1363 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1365 if (0 != adj_counter.packets)
1367 vec_add2(ctx->counters, vl_counter, 1);
1370 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1371 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1372 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1373 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1374 vl_counter->link_type = adj->ia_link;
1376 return (ADJ_WALK_RC_CONTINUE);
1379 #define MIN(x,y) (((x)<(y))?(x):(y))
1382 ip6_nbr_ship (stats_main_t * sm,
1383 ip6_nbr_stats_ctx_t *ctx)
1385 api_main_t *am = sm->api_main;
1386 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1387 svm_queue_t *q = shmem_hdr->vl_input_queue;
1388 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1392 * If the walk context has counters, which may be left over from the last
1393 * suspend, then we continue from there.
1395 while (0 != vec_len(ctx->counters))
1397 u32 n_items = MIN (vec_len (ctx->counters),
1398 IP6_FIB_COUNTER_BATCH_SIZE);
1401 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1403 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1406 (vl_api_ip6_nbr_counter_t)));
1407 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1408 mp->count = ntohl (n_items);
1409 mp->sw_if_index = ntohl (ctx->sw_if_index);
1414 * copy the counters from the back of the context, then we can easily
1415 * 'erase' them by resetting the vector length.
1416 * The order we push the stats to the caller is not important.
1419 &ctx->counters[vec_len (ctx->counters) - n_items],
1420 n_items * sizeof (*ctx->counters));
1422 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1428 pause = svm_queue_is_full (q);
1430 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1431 svm_queue_unlock (q);
1435 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1436 STATS_RELEASE_DELAY_NS);
1441 do_ip6_nbr_counters (stats_main_t * sm)
1443 vnet_main_t *vnm = vnet_get_main ();
1444 vnet_interface_main_t *im = &vnm->interface_main;
1445 vnet_sw_interface_t *si;
1447 ip6_nbr_stats_ctx_t ctx = {
1453 pool_foreach (si, im->sw_interfaces,
1456 * update the interface we are now concerned with
1458 ctx.sw_if_index = si->sw_if_index;
1461 * we are about to walk another interface, so we shouldn't have any pending
1464 ASSERT(ctx.counters == NULL);
1467 * visit each neighbour adjacency on the interface and collect
1468 * its current stats.
1469 * Because we hold the lock the walk is synchronous, so safe to routing
1470 * updates. It's limited in work by the number of adjacenies on an
1471 * interface, which is typically not huge.
1473 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1474 adj_nbr_walk (si->sw_if_index,
1481 * if this interface has some adjacencies with counters then ship them,
1482 * else continue to the next interface.
1484 if (NULL != ctx.counters)
1486 ip6_nbr_ship(sm, &ctx);
1493 do_ip4_fib_counters (stats_main_t * sm)
1495 ip4_main_t *im4 = &ip4_main;
1496 api_main_t *am = sm->api_main;
1497 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1498 svm_queue_t *q = shmem_hdr->vl_input_queue;
1502 do_ip46_fibs_t *do_fibs;
1503 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1504 u32 items_this_message;
1505 vl_api_ip4_fib_counter_t *ctrp = 0;
1506 u32 start_at_fib_index = 0;
1509 do_fibs = &sm->do_ip46_fibs;
1512 vec_reset_length (do_fibs->fibs);
1514 pool_foreach (fib, im4->fibs,
1515 ({vec_add1(do_fibs->fibs,fib);}));
1519 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1521 fib = do_fibs->fibs[j];
1522 /* We may have bailed out due to control-plane activity */
1523 while ((fib - im4->fibs) < start_at_fib_index)
1526 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1530 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1531 mp = vl_msg_api_alloc_as_if_client
1533 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1534 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1536 mp->vrf_id = ntohl (fib->ft_table_id);
1537 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1541 /* happens if the last FIB was empty... */
1542 ASSERT (mp->count == 0);
1543 mp->vrf_id = ntohl (fib->ft_table_id);
1546 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1548 vec_reset_length (do_fibs->ip4routes);
1549 vec_reset_length (do_fibs->results);
1551 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1553 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1557 vec_reset_length (do_fibs->pvec);
1559 x.address_length = i;
1561 hash_foreach_pair (p, hash, (
1563 vec_add1 (do_fibs->pvec, p);}
1565 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1567 p = do_fibs->pvec[k];
1568 x.address.data_u32 = p->key;
1569 x.index = p->value[0];
1571 vec_add1 (do_fibs->ip4routes, x);
1572 if (sm->data_structure_lock->release_hint)
1574 start_at_fib_index = fib - im4->fibs;
1576 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1577 STATS_RELEASE_DELAY_NS);
1579 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1585 vec_foreach (r, do_fibs->ip4routes)
1588 const dpo_id_t *dpo_id;
1591 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1592 index = (u32) dpo_id->dpoi_index;
1594 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1597 * If it has actually
1598 * seen at least one packet, send it.
1603 /* already in net byte order */
1604 ctrp->address = r->address.as_u32;
1605 ctrp->address_length = r->address_length;
1606 ctrp->packets = clib_host_to_net_u64 (c.packets);
1607 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1611 if (mp->count == items_this_message)
1613 mp->count = htonl (items_this_message);
1615 * If the main thread's input queue is stuffed,
1616 * drop the data structure lock (which the main thread
1617 * may want), and take a pause.
1620 if (svm_queue_is_full (q))
1623 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1624 svm_queue_unlock (q);
1626 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1627 STATS_RELEASE_DELAY_NS);
1630 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1631 svm_queue_unlock (q);
1633 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1634 mp = vl_msg_api_alloc_as_if_client
1636 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1637 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1639 mp->vrf_id = ntohl (fib->ft_table_id);
1640 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1642 } /* for each (mp or single) adj */
1643 if (sm->data_structure_lock->release_hint)
1645 start_at_fib_index = fib - im4->fibs;
1647 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1649 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1652 } /* vec_foreach (routes) */
1656 /* Flush any data from this fib */
1659 mp->count = htonl (mp->count);
1660 vl_msg_api_send_shmem (q, (u8 *) & mp);
1665 /* If e.g. the last FIB had no reportable routes, free the buffer */
1667 vl_msg_api_free (mp);
1671 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1673 stats_main_t *sm = ctx;
1674 do_ip46_fibs_t *do_fibs;
1675 mfib_entry_t *entry;
1677 do_fibs = &sm->do_ip46_fibs;
1678 entry = mfib_entry_get (fei);
1680 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1686 do_ip4_mfib_counters (stats_main_t * sm)
1688 ip4_main_t *im4 = &ip4_main;
1689 api_main_t *am = sm->api_main;
1690 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1691 svm_queue_t *q = shmem_hdr->vl_input_queue;
1694 do_ip46_fibs_t *do_fibs;
1695 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1696 u32 items_this_message;
1697 vl_api_ip4_mfib_counter_t *ctrp = 0;
1698 u32 start_at_mfib_index = 0;
1701 do_fibs = &sm->do_ip46_fibs;
1703 vec_reset_length (do_fibs->mfibs);
1705 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1708 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1710 mfib = do_fibs->mfibs[j];
1711 /* We may have bailed out due to control-plane activity */
1712 while ((mfib - im4->mfibs) < start_at_mfib_index)
1717 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1718 mp = vl_msg_api_alloc_as_if_client
1720 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1721 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1723 mp->vrf_id = ntohl (mfib->mft_table_id);
1724 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1728 /* happens if the last MFIB was empty... */
1729 ASSERT (mp->count == 0);
1730 mp->vrf_id = ntohl (mfib->mft_table_id);
1733 vec_reset_length (do_fibs->mroutes);
1736 * walk the table with table updates blocked
1738 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1740 mfib_table_walk (mfib->mft_index,
1741 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1744 vec_foreach (pfx, do_fibs->mroutes)
1746 const dpo_id_t *dpo_id;
1747 fib_node_index_t mfei;
1752 * re-lookup the entry, since we suspend during the collection
1754 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1756 if (FIB_NODE_INDEX_INVALID == mfei)
1759 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1760 index = (u32) dpo_id->dpoi_index;
1762 vlib_get_combined_counter (&replicate_main.repm_counters,
1763 dpo_id->dpoi_index, &c);
1765 * If it has seen at least one packet, send it.
1769 /* already in net byte order */
1770 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1771 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1772 ctrp->group_length = pfx->fp_len;
1773 ctrp->packets = clib_host_to_net_u64 (c.packets);
1774 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1778 if (mp->count == items_this_message)
1780 mp->count = htonl (items_this_message);
1782 * If the main thread's input queue is stuffed,
1783 * drop the data structure lock (which the main thread
1784 * may want), and take a pause.
1788 while (svm_queue_is_full (q))
1790 svm_queue_unlock (q);
1791 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1792 STATS_RELEASE_DELAY_NS);
1795 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1796 svm_queue_unlock (q);
1798 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1799 mp = vl_msg_api_alloc_as_if_client
1801 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1802 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1804 mp->vrf_id = ntohl (mfib->mft_table_id);
1805 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1810 /* Flush any data from this mfib */
1813 mp->count = htonl (mp->count);
1814 vl_msg_api_send_shmem (q, (u8 *) & mp);
1819 /* If e.g. the last FIB had no reportable routes, free the buffer */
1821 vl_msg_api_free (mp);
1825 do_ip6_mfib_counters (stats_main_t * sm)
1827 ip6_main_t *im6 = &ip6_main;
1828 api_main_t *am = sm->api_main;
1829 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1830 svm_queue_t *q = shmem_hdr->vl_input_queue;
1833 do_ip46_fibs_t *do_fibs;
1834 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1835 u32 items_this_message;
1836 vl_api_ip6_mfib_counter_t *ctrp = 0;
1837 u32 start_at_mfib_index = 0;
1840 do_fibs = &sm->do_ip46_fibs;
1842 vec_reset_length (do_fibs->mfibs);
1844 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1847 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1849 mfib = do_fibs->mfibs[j];
1850 /* We may have bailed out due to control-plane activity */
1851 while ((mfib - im6->mfibs) < start_at_mfib_index)
1856 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1857 mp = vl_msg_api_alloc_as_if_client
1859 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1860 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1862 mp->vrf_id = ntohl (mfib->mft_table_id);
1863 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1867 /* happens if the last MFIB was empty... */
1868 ASSERT (mp->count == 0);
1869 mp->vrf_id = ntohl (mfib->mft_table_id);
1872 vec_reset_length (do_fibs->mroutes);
1875 * walk the table with table updates blocked
1877 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1879 mfib_table_walk (mfib->mft_index,
1880 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1883 vec_foreach (pfx, do_fibs->mroutes)
1885 const dpo_id_t *dpo_id;
1886 fib_node_index_t mfei;
1891 * re-lookup the entry, since we suspend during the collection
1893 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1895 if (FIB_NODE_INDEX_INVALID == mfei)
1898 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1899 index = (u32) dpo_id->dpoi_index;
1901 vlib_get_combined_counter (&replicate_main.repm_counters,
1902 dpo_id->dpoi_index, &c);
1904 * If it has seen at least one packet, send it.
1908 /* already in net byte order */
1909 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1910 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1911 ctrp->group_length = pfx->fp_len;
1912 ctrp->packets = clib_host_to_net_u64 (c.packets);
1913 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1917 if (mp->count == items_this_message)
1919 mp->count = htonl (items_this_message);
1921 * If the main thread's input queue is stuffed,
1922 * drop the data structure lock (which the main thread
1923 * may want), and take a pause.
1927 while (svm_queue_is_full (q))
1929 svm_queue_unlock (q);
1930 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1931 STATS_RELEASE_DELAY_NS);
1934 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1935 svm_queue_unlock (q);
1937 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1938 mp = vl_msg_api_alloc_as_if_client
1940 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1941 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1943 mp->vrf_id = ntohl (mfib->mft_table_id);
1944 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1949 /* Flush any data from this mfib */
1952 mp->count = htonl (mp->count);
1953 vl_msg_api_send_shmem (q, (u8 *) & mp);
1958 /* If e.g. the last FIB had no reportable routes, free the buffer */
1960 vl_msg_api_free (mp);
1966 ip6_route_t **routep;
1968 } add_routes_in_fib_arg_t;
1971 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1973 add_routes_in_fib_arg_t *ap = arg;
1974 stats_main_t *sm = ap->sm;
1976 if (sm->data_structure_lock->release_hint)
1977 clib_longjmp (&sm->jmp_buf, 1);
1979 if (kvp->key[2] >> 32 == ap->fib_index)
1981 ip6_address_t *addr;
1983 addr = (ip6_address_t *) kvp;
1984 vec_add2 (*ap->routep, r, 1);
1985 r->address = addr[0];
1986 r->address_length = kvp->key[2] & 0xFF;
1987 r->index = kvp->value;
1992 do_ip6_fib_counters (stats_main_t * sm)
1994 ip6_main_t *im6 = &ip6_main;
1995 api_main_t *am = sm->api_main;
1996 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1997 svm_queue_t *q = shmem_hdr->vl_input_queue;
2000 do_ip46_fibs_t *do_fibs;
2001 vl_api_vnet_ip6_fib_counters_t *mp = 0;
2002 u32 items_this_message;
2003 vl_api_ip6_fib_counter_t *ctrp = 0;
2004 u32 start_at_fib_index = 0;
2005 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
2006 add_routes_in_fib_arg_t _a, *a = &_a;
2009 do_fibs = &sm->do_ip46_fibs;
2011 vec_reset_length (do_fibs->fibs);
2013 pool_foreach (fib, im6->fibs,
2014 ({vec_add1(do_fibs->fibs,fib);}));
2018 for (i = 0; i < vec_len (do_fibs->fibs); i++)
2020 fib = do_fibs->fibs[i];
2021 /* We may have bailed out due to control-plane activity */
2022 while ((fib - im6->fibs) < start_at_fib_index)
2027 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2028 mp = vl_msg_api_alloc_as_if_client
2030 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2031 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2033 mp->vrf_id = ntohl (fib->ft_table_id);
2034 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2037 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2039 vec_reset_length (do_fibs->ip6routes);
2040 vec_reset_length (do_fibs->results);
2042 a->fib_index = fib - im6->fibs;
2043 a->routep = &do_fibs->ip6routes;
2046 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
2048 start_at_fib_index = fib - im6->fibs;
2049 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
2054 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2055 STATS_RELEASE_DELAY_NS);
2057 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2061 vec_foreach (r, do_fibs->ip6routes)
2065 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2068 * If it has actually
2069 * seen at least one packet, send it.
2073 /* already in net byte order */
2074 ctrp->address[0] = r->address.as_u64[0];
2075 ctrp->address[1] = r->address.as_u64[1];
2076 ctrp->address_length = (u8) r->address_length;
2077 ctrp->packets = clib_host_to_net_u64 (c.packets);
2078 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2082 if (mp->count == items_this_message)
2084 mp->count = htonl (items_this_message);
2086 * If the main thread's input queue is stuffed,
2087 * drop the data structure lock (which the main thread
2088 * may want), and take a pause.
2091 if (svm_queue_is_full (q))
2094 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2095 svm_queue_unlock (q);
2097 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2098 STATS_RELEASE_DELAY_NS);
2101 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2102 svm_queue_unlock (q);
2104 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2105 mp = vl_msg_api_alloc_as_if_client
2107 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2108 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2110 mp->vrf_id = ntohl (fib->ft_table_id);
2111 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2115 if (sm->data_structure_lock->release_hint)
2117 start_at_fib_index = fib - im6->fibs;
2119 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2121 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2124 } /* vec_foreach (routes) */
2128 /* Flush any data from this fib */
2131 mp->count = htonl (mp->count);
2132 vl_msg_api_send_shmem (q, (u8 *) & mp);
2137 /* If e.g. the last FIB had no reportable routes, free the buffer */
2139 vl_msg_api_free (mp);
2142 typedef struct udp_encap_stat_t_
2148 typedef struct udp_encap_stats_walk_t_
2150 udp_encap_stat_t *stats;
2151 } udp_encap_stats_walk_t;
2154 udp_encap_stats_walk_cb (index_t uei, void *arg)
2156 udp_encap_stats_walk_t *ctx = arg;
2157 udp_encap_stat_t *stat;
2160 ue = udp_encap_get (uei);
2161 vec_add2 (ctx->stats, stat, 1);
2164 udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]);
2170 udp_encap_ship (udp_encap_stats_walk_t * ctx)
2172 vl_api_vnet_udp_encap_counters_t *mp;
2173 vl_shmem_hdr_t *shmem_hdr;
2181 shmem_hdr = am->shmem_hdr;
2182 q = shmem_hdr->vl_input_queue;
2185 * If the walk context has counters, which may be left over from the last
2186 * suspend, then we continue from there.
2188 while (0 != vec_len (ctx->stats))
2190 u32 n_items = MIN (vec_len (ctx->stats),
2191 UDP_ENCAP_COUNTER_BATCH_SIZE);
2194 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2196 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2199 (vl_api_udp_encap_counter_t)));
2200 mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2201 mp->count = ntohl (n_items);
2204 * copy the counters from the back of the context, then we can easily
2205 * 'erase' them by resetting the vector length.
2206 * The order we push the stats to the caller is not important.
2209 &ctx->stats[vec_len (ctx->stats) - n_items],
2210 n_items * sizeof (*ctx->stats));
2212 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2218 pause = svm_queue_is_full (q);
2220 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2221 svm_queue_unlock (q);
2225 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2226 STATS_RELEASE_DELAY_NS);
2231 do_udp_encap_counters (stats_main_t * sm)
2233 udp_encap_stat_t *stat;
2235 udp_encap_stats_walk_t ctx = {
2239 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2240 udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
2243 udp_encap_ship (&ctx);
2247 stats_set_poller_delay (u32 poller_delay_sec)
2249 stats_main_t *sm = &stats_main;
2250 if (!poller_delay_sec)
2252 return VNET_API_ERROR_INVALID_ARGUMENT;
2256 sm->stats_poll_interval_in_seconds = poller_delay_sec;
2261 static clib_error_t *
2262 stats_config (vlib_main_t * vm, unformat_input_t * input)
2266 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2268 if (unformat (input, "interval %u", &sec))
2270 int rv = stats_set_poller_delay (sec);
2273 return clib_error_return (0,
2274 "`stats_set_poller_delay' API call failed, rv=%d:%U",
2275 (int) rv, format_vnet_api_errno, rv);
2281 return clib_error_return (0, "unknown input '%U'",
2282 format_unformat_error, input);
2288 /* stats { ... } configuration. */
2291 * @cfgcmd{interval, <seconds>}
2292 * Configure stats poller delay to be @c seconds.
2295 VLIB_CONFIG_FUNCTION (stats_config, "stats");
2298 vl_api_stats_get_poller_delay_t_handler
2299 (vl_api_stats_get_poller_delay_t * mp)
2301 stats_main_t *sm = &stats_main;
2302 vl_api_registration_t *reg;
2303 reg = vl_api_client_index_to_registration (mp->client_index);
2306 vl_api_stats_get_poller_delay_reply_t *rmp;
2308 rmp = vl_msg_api_alloc (sizeof (*rmp));
2309 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
2310 rmp->context = mp->context;
2312 rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2314 vl_api_send_msg (reg, (u8 *) rmp);
2319 stats_thread_fn (void *arg)
2321 stats_main_t *sm = &stats_main;
2322 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2323 vlib_thread_main_t *tm = vlib_get_thread_main ();
2325 /* stats thread wants no signals. */
2329 pthread_sigmask (SIG_SETMASK, &s, 0);
2332 if (vec_len (tm->thread_prefix))
2333 vlib_set_thread_name ((char *)
2334 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2336 clib_mem_set_heap (w->thread_mheap);
2340 ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
2343 if (!(sm->enable_poller))
2348 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2349 do_combined_per_interface_counters (sm);
2352 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2353 do_simple_per_interface_counters (sm);
2355 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2356 do_ip4_fib_counters (sm);
2358 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2359 do_ip6_fib_counters (sm);
2361 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2362 do_ip4_mfib_counters (sm);
2364 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2365 do_ip6_mfib_counters (sm);
2367 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2368 do_ip4_nbr_counters (sm);
2370 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2371 do_ip6_nbr_counters (sm);
2373 if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS]))
2374 do_udp_encap_counters (sm);
2379 vl_api_vnet_interface_simple_counters_t_handler
2380 (vl_api_vnet_interface_simple_counters_t * mp)
2382 vpe_client_registration_t *clients, client;
2383 stats_main_t *sm = &stats_main;
2384 vl_api_registration_t *reg, *reg_prev = NULL;
2385 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2389 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2392 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2393 ~0 /*flag for all */ );
2395 for (i = 0; i < vec_len (clients); i++)
2397 client = clients[i];
2398 reg = vl_api_client_index_to_registration (client.client_index);
2401 if (reg_prev && vl_api_can_send_msg (reg_prev))
2403 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2404 clib_memcpy (mp_copy, mp, mp_size);
2405 vl_api_send_msg (reg_prev, (u8 *) mp);
2413 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2414 client.client_index);
2421 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2424 if (reg_prev && vl_api_can_send_msg (reg_prev))
2426 vl_api_send_msg (reg_prev, (u8 *) mp);
2430 vl_msg_api_free (mp);
2435 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2437 stats_main_t *sm = &stats_main;
2438 vl_api_registration_t *reg, *reg_prev = NULL;
2439 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2441 vpe_client_registration_t *clients, client;
2444 mp_size = sizeof (*mp_copy) +
2445 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2448 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2450 for (i = 0; i < vec_len (clients); i++)
2452 client = clients[i];
2453 reg = vl_api_client_index_to_registration (client.client_index);
2456 if (reg_prev && vl_api_can_send_msg (reg_prev))
2458 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2459 clib_memcpy (mp_copy, mp, mp_size);
2460 vl_api_send_msg (reg_prev, (u8 *) mp);
2467 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2468 ~0, client.client_index);
2474 if (reg_prev && vl_api_can_send_msg (reg_prev))
2476 vl_api_send_msg (reg_prev, (u8 *) mp);
2480 vl_msg_api_free (mp);
2485 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2487 stats_main_t *sm = &stats_main;
2488 vl_api_registration_t *reg, *reg_prev = NULL;
2489 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2491 vpe_client_registration_t *clients, client;
2494 mp_size = sizeof (*mp_copy) +
2495 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2498 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2500 for (i = 0; i < vec_len (clients); i++)
2502 client = clients[i];
2503 reg = vl_api_client_index_to_registration (client.client_index);
2506 if (reg_prev && vl_api_can_send_msg (reg_prev))
2508 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2509 clib_memcpy (mp_copy, mp, mp_size);
2510 vl_api_send_msg (reg_prev, (u8 *) mp);
2517 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2518 ~0, client.client_index);
2525 if (reg_prev && vl_api_can_send_msg (reg_prev))
2527 vl_api_send_msg (reg_prev, (u8 *) mp);
2531 vl_msg_api_free (mp);
2536 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2538 stats_main_t *sm = &stats_main;
2539 vl_api_registration_t *reg, *reg_prev = NULL;
2540 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2542 vpe_client_registration_t *clients, client;
2545 mp_size = sizeof (*mp_copy) +
2546 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2549 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2551 for (i = 0; i < vec_len (clients); i++)
2553 client = clients[i];
2554 reg = vl_api_client_index_to_registration (client.client_index);
2557 if (reg_prev && vl_api_can_send_msg (reg_prev))
2559 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2560 clib_memcpy (mp_copy, mp, mp_size);
2561 vl_api_send_msg (reg_prev, (u8 *) mp);
2568 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2569 ~0, client.client_index);
2576 if (reg_prev && vl_api_can_send_msg (reg_prev))
2578 vl_api_send_msg (reg_prev, (u8 *) mp);
2582 vl_msg_api_free (mp);
2587 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2589 stats_main_t *sm = &stats_main;
2590 vl_api_registration_t *reg, *reg_prev = NULL;
2591 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2593 vpe_client_registration_t *clients, client;
2596 mp_size = sizeof (*mp_copy) +
2597 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2600 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2602 for (i = 0; i < vec_len (clients); i++)
2604 client = clients[i];
2605 reg = vl_api_client_index_to_registration (client.client_index);
2608 if (reg_prev && vl_api_can_send_msg (reg_prev))
2610 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2611 clib_memcpy (mp_copy, mp, mp_size);
2612 vl_api_send_msg (reg_prev, (u8 *) mp);
2619 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2620 ~0, client.client_index);
2627 if (reg_prev && vl_api_can_send_msg (reg_prev))
2629 vl_api_send_msg (reg_prev, (u8 *) mp);
2633 vl_msg_api_free (mp);
2638 vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
2640 stats_main_t *sm = &stats_main;
2641 vpe_client_registration_t rp;
2642 vl_api_want_udp_encap_stats_reply_t *rmp;
2645 vl_api_registration_t *reg;
2648 fib = ~0; //Using same mechanism as _per_interface_
2649 rp.client_index = mp->client_index;
2650 rp.client_pid = mp->pid;
2652 handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2655 reg = vl_api_client_index_to_registration (mp->client_index);
2659 sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2660 fib, mp->client_index);
2664 rmp = vl_msg_api_alloc (sizeof (*rmp));
2665 rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2666 rmp->context = mp->context;
2667 rmp->retval = retval;
2669 vl_api_send_msg (reg, (u8 *) rmp);
2673 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2675 stats_main_t *sm = &stats_main;
2676 vpe_client_registration_t rp;
2677 vl_api_want_stats_reply_t *rmp;
2681 vl_api_registration_t *reg;
2683 item = ~0; //"ALL THE THINGS IN THE THINGS
2684 rp.client_index = mp->client_index;
2685 rp.client_pid = mp->pid;
2687 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2688 item, mp->enable_disable);
2690 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2691 item, mp->enable_disable);
2693 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2694 item, mp->enable_disable);
2696 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2697 item, mp->enable_disable);
2699 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2700 item, mp->enable_disable);
2702 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2703 item, mp->enable_disable);
2706 reg = vl_api_client_index_to_registration (mp->client_index);
2710 rmp = vl_msg_api_alloc (sizeof (*rmp));
2711 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2712 rmp->context = mp->context;
2713 rmp->retval = retval;
2715 vl_api_send_msg (reg, (u8 *) rmp);
2719 vl_api_want_interface_simple_stats_t_handler
2720 (vl_api_want_interface_simple_stats_t * mp)
2722 stats_main_t *sm = &stats_main;
2723 vpe_client_registration_t rp;
2724 vl_api_want_interface_simple_stats_reply_t *rmp;
2728 vl_api_registration_t *reg;
2730 swif = ~0; //Using same mechanism as _per_interface_
2731 rp.client_index = mp->client_index;
2732 rp.client_pid = mp->pid;
2734 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2735 mp->enable_disable);
2738 reg = vl_api_client_index_to_registration (mp->client_index);
2743 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2748 rmp = vl_msg_api_alloc (sizeof (*rmp));
2749 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2750 rmp->context = mp->context;
2751 rmp->retval = retval;
2753 vl_api_send_msg (reg, (u8 *) rmp);
2758 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2760 stats_main_t *sm = &stats_main;
2761 vpe_client_registration_t rp;
2762 vl_api_want_ip4_fib_stats_reply_t *rmp;
2765 vl_api_registration_t *reg;
2768 fib = ~0; //Using same mechanism as _per_interface_
2769 rp.client_index = mp->client_index;
2770 rp.client_pid = mp->pid;
2772 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2773 mp->enable_disable);
2776 reg = vl_api_client_index_to_registration (mp->client_index);
2780 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2781 fib, mp->client_index);
2785 rmp = vl_msg_api_alloc (sizeof (*rmp));
2786 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2787 rmp->context = mp->context;
2788 rmp->retval = retval;
2790 vl_api_send_msg (reg, (u8 *) rmp);
2794 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2796 stats_main_t *sm = &stats_main;
2797 vpe_client_registration_t rp;
2798 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2801 vl_api_registration_t *reg;
2804 mfib = ~0; //Using same mechanism as _per_interface_
2805 rp.client_index = mp->client_index;
2806 rp.client_pid = mp->pid;
2808 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2809 mp->enable_disable);
2812 reg = vl_api_client_index_to_registration (mp->client_index);
2815 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2816 mfib, mp->client_index);
2820 rmp = vl_msg_api_alloc (sizeof (*rmp));
2821 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2822 rmp->context = mp->context;
2823 rmp->retval = retval;
2825 vl_api_send_msg (reg, (u8 *) rmp);
2829 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2831 stats_main_t *sm = &stats_main;
2832 vpe_client_registration_t rp;
2833 vl_api_want_ip4_fib_stats_reply_t *rmp;
2836 vl_api_registration_t *reg;
2839 fib = ~0; //Using same mechanism as _per_interface_
2840 rp.client_index = mp->client_index;
2841 rp.client_pid = mp->pid;
2843 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2844 mp->enable_disable);
2847 reg = vl_api_client_index_to_registration (mp->client_index);
2850 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2851 fib, mp->client_index);
2855 rmp = vl_msg_api_alloc (sizeof (*rmp));
2856 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2857 rmp->context = mp->context;
2858 rmp->retval = retval;
2860 vl_api_send_msg (reg, (u8 *) rmp);
2864 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2866 stats_main_t *sm = &stats_main;
2867 vpe_client_registration_t rp;
2868 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2871 vl_api_registration_t *reg;
2874 mfib = ~0; //Using same mechanism as _per_interface_
2875 rp.client_index = mp->client_index;
2876 rp.client_pid = mp->pid;
2878 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2879 mp->enable_disable);
2882 reg = vl_api_client_index_to_registration (mp->client_index);
2885 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2886 mfib, mp->client_index);
2890 rmp = vl_msg_api_alloc (sizeof (*rmp));
2891 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2892 rmp->context = mp->context;
2893 rmp->retval = retval;
2895 vl_api_send_msg (reg, (u8 *) rmp);
2898 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2900 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2905 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2910 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2912 stats_main_t *sm = &stats_main;
2913 vnet_interface_main_t *im = sm->interface_main;
2914 vl_api_vnet_get_summary_stats_reply_t *rmp;
2915 vlib_combined_counter_main_t *cm;
2918 u64 total_pkts[VLIB_N_RX_TX];
2919 u64 total_bytes[VLIB_N_RX_TX];
2920 vl_api_registration_t *reg;
2922 reg = vl_api_client_index_to_registration (mp->client_index);
2926 rmp = vl_msg_api_alloc (sizeof (*rmp));
2927 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2928 rmp->context = mp->context;
2931 memset (total_pkts, 0, sizeof (total_pkts));
2932 memset (total_bytes, 0, sizeof (total_bytes));
2934 vnet_interface_counter_lock (im);
2936 vec_foreach (cm, im->combined_sw_if_counters)
2938 which = cm - im->combined_sw_if_counters;
2940 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2942 vlib_get_combined_counter (cm, i, &v);
2943 total_pkts[which] += v.packets;
2944 total_bytes[which] += v.bytes;
2947 vnet_interface_counter_unlock (im);
2949 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2950 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2951 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2952 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2954 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2956 vl_api_send_msg (reg, (u8 *) rmp);
2960 stats_memclnt_delete_callback (u32 client_index)
2962 vpe_client_stats_registration_t *rp;
2963 stats_main_t *sm = &stats_main;
2967 /* p = hash_get (sm->stats_registration_hash, client_index); */
2970 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2971 /* pool_put (sm->stats_registrations, rp); */
2972 /* hash_unset (sm->stats_registration_hash, client_index); */
2978 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2979 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2980 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2981 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2982 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2983 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2984 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2985 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2986 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2987 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2988 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2989 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2990 #define vl_api_map_stats_segment_t_print vl_noop_handler
2993 vl_api_map_stats_segment_t_handler (vl_api_map_stats_segment_t * mp)
2995 vl_api_map_stats_segment_reply_t *rmp;
2996 stats_main_t *sm = &stats_main;
2997 ssvm_private_t *ssvmp = &sm->stat_segment;
2998 vl_api_registration_t *regp;
2999 api_main_t *am = &api_main;
3001 vl_api_shm_elem_config_t *config = 0;
3002 vl_shmem_hdr_t *shmem_hdr;
3005 regp = vl_api_client_index_to_registration (mp->client_index);
3008 clib_warning ("API client disconnected");
3011 if (regp->registration_type != REGISTRATION_TYPE_SOCKET_SERVER)
3012 rv = VNET_API_ERROR_INVALID_REGISTRATION;
3014 rmp = vl_msg_api_alloc (sizeof (*rmp));
3015 rmp->_vl_msg_id = htons (VL_API_MAP_STATS_SEGMENT_REPLY);
3016 rmp->context = mp->context;
3017 rmp->retval = htonl (rv);
3019 vl_api_send_msg (regp, (u8 *) rmp);
3025 * We need the reply message to make it out the back door
3026 * before we send the magic fd message so force a flush
3028 cf = vl_api_registration_file (regp);
3029 cf->write_function (cf);
3031 /* Send the magic "here's your sign (aka fd)" socket message */
3032 vl_sock_api_send_fd_msg (cf->file_descriptor, ssvmp->fd);
3035 static clib_error_t *
3036 stats_init (vlib_main_t * vm)
3038 stats_main_t *sm = &stats_main;
3039 api_main_t *am = &api_main;
3040 void *vlib_worker_thread_bootstrap_fn (void *arg);
3043 sm->vnet_main = vnet_get_main ();
3044 sm->interface_main = &vnet_get_main ()->interface_main;
3046 sm->stats_poll_interval_in_seconds = 10;
3047 sm->data_structure_lock =
3048 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
3049 CLIB_CACHE_LINE_BYTES);
3050 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
3053 vl_msg_api_set_handlers(VL_API_##N, #n, \
3054 vl_api_##n##_t_handler, \
3056 vl_api_##n##_t_endian, \
3057 vl_api_##n##_t_print, \
3058 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
3062 /* tell the msg infra not to free these messages... */
3063 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
3064 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
3065 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
3066 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
3067 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
3068 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
3071 * Set up the (msg_name, crc, message-id) table
3073 setup_message_id_table (am);
3075 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
3076 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
3077 #define stats_reg(n) \
3078 sm->stats_registrations[IDX_##n] = 0; \
3079 sm->stats_registration_hash[IDX_##n] = 0;
3080 #include <vpp/stats/stats.reg>
3086 VLIB_INIT_FUNCTION (stats_init);
3089 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
3091 .function = stats_thread_fn,
3094 .no_data_structure_clone = 1,
3100 * fd.io coding-style-patch-verification: ON
3103 * eval: (c-set-style "gnu")