2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include "stats_to_be_deprecated.h"
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
22 #include <vnet/bier/bier_fmask.h>
23 #include <vnet/bier/bier_table.h>
24 #include <vnet/fib/fib_api.h>
28 stats_main_t stats_main;
30 #include <vnet/ip/ip.h>
32 #include <vpp/api/vpe_msg_enum.h>
35 #define f64_print(a,b)
37 #define vl_typedefs /* define message structures */
38 #include <vpp/api/vpe_all_api_h.h>
41 #define vl_endianfun /* define message structures */
42 #include <vpp/api/vpe_all_api_h.h>
45 /* instantiate all the print functions we know about */
46 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
48 #include <vpp/api/vpe_all_api_h.h>
51 #define foreach_stats_msg \
52 _(WANT_STATS, want_stats) \
53 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
54 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
55 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
56 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
57 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
58 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
59 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
60 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
61 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
62 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
63 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
64 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
65 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
66 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
67 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
68 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
69 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
70 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
71 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats) \
72 _(WANT_BIER_NEIGHBOR_STATS, want_bier_neighbor_stats)
74 #define vl_msg_name_crc_list
75 #include <vpp/stats/stats.api.h>
76 #undef vl_msg_name_crc_list
79 setup_message_id_table (api_main_t * am)
82 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
83 foreach_vl_msg_name_crc_stats;
87 /* These constants ensure msg sizes <= 1024, aka ring allocation */
88 #define SIMPLE_COUNTER_BATCH_SIZE 126
89 #define COMBINED_COUNTER_BATCH_SIZE 63
90 #define IP4_FIB_COUNTER_BATCH_SIZE 48
91 #define IP6_FIB_COUNTER_BATCH_SIZE 30
92 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
93 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
94 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
95 #define BIER_NEIGHBOR_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_bier_neighbor_counter_t))
98 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
102 format_vnet_interface_combined_counters (u8 * s, va_list * args)
104 stats_main_t *sm = &stats_main;
105 vl_api_vnet_interface_combined_counters_t *mp =
106 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
109 u32 count, sw_if_index;
111 count = ntohl (mp->count);
112 sw_if_index = ntohl (mp->first_sw_if_index);
116 vp = (vlib_counter_t *) mp->data;
118 switch (mp->vnet_counter_type)
120 case VNET_INTERFACE_COUNTER_RX:
123 case VNET_INTERFACE_COUNTER_TX:
127 counter_name = "bogus";
130 for (i = 0; i < count; i++)
132 packets = clib_mem_unaligned (&vp->packets, u64);
133 packets = clib_net_to_host_u64 (packets);
134 bytes = clib_mem_unaligned (&vp->bytes, u64);
135 bytes = clib_net_to_host_u64 (bytes);
137 s = format (s, "%U.%s.packets %lld\n",
138 format_vnet_sw_if_index_name,
139 sm->vnet_main, sw_if_index, counter_name, packets);
140 s = format (s, "%U.%s.bytes %lld\n",
141 format_vnet_sw_if_index_name,
142 sm->vnet_main, sw_if_index, counter_name, bytes);
149 format_vnet_interface_simple_counters (u8 * s, va_list * args)
151 stats_main_t *sm = &stats_main;
152 vl_api_vnet_interface_simple_counters_t *mp =
153 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
155 u32 count, sw_if_index;
156 count = ntohl (mp->count);
157 sw_if_index = ntohl (mp->first_sw_if_index);
159 vp = (u64 *) mp->data;
162 switch (mp->vnet_counter_type)
164 case VNET_INTERFACE_COUNTER_DROP:
165 counter_name = "drop";
167 case VNET_INTERFACE_COUNTER_PUNT:
168 counter_name = "punt";
170 case VNET_INTERFACE_COUNTER_IP4:
171 counter_name = "ip4";
173 case VNET_INTERFACE_COUNTER_IP6:
174 counter_name = "ip6";
176 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
177 counter_name = "rx-no-buff";
179 case VNET_INTERFACE_COUNTER_RX_MISS:
180 counter_name = "rx-miss";
182 case VNET_INTERFACE_COUNTER_RX_ERROR:
183 counter_name = "rx-error (fifo-full)";
185 case VNET_INTERFACE_COUNTER_TX_ERROR:
186 counter_name = "tx-error (fifo-full)";
189 counter_name = "bogus";
192 for (i = 0; i < count; i++)
194 v = clib_mem_unaligned (vp, u64);
195 v = clib_net_to_host_u64 (v);
197 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
198 sm->vnet_main, sw_if_index, counter_name, v);
206 dslock (stats_main_t * sm, int release_hint, int tag)
209 data_structure_lock_t *l = sm->data_structure_lock;
211 if (PREDICT_FALSE (l == 0))
214 thread_index = vlib_get_thread_index ();
215 if (l->lock && l->thread_index == thread_index)
224 while (clib_atomic_test_and_set (&l->lock))
227 l->thread_index = thread_index;
232 stats_dslock_with_hint (int hint, int tag)
234 stats_main_t *sm = &stats_main;
235 dslock (sm, hint, tag);
239 dsunlock (stats_main_t * sm)
242 data_structure_lock_t *l = sm->data_structure_lock;
244 if (PREDICT_FALSE (l == 0))
247 thread_index = vlib_get_thread_index ();
248 ASSERT (l->lock && l->thread_index == thread_index);
254 CLIB_MEMORY_BARRIER ();
260 stats_dsunlock (int hint, int tag)
262 stats_main_t *sm = &stats_main;
266 static vpe_client_registration_t *
267 get_client_for_stat (u32 reg, u32 item, u32 client_index)
269 stats_main_t *sm = &stats_main;
270 vpe_client_stats_registration_t *registration;
273 /* Is there anything listening for item in that reg */
274 p = hash_get (sm->stats_registration_hash[reg], item);
279 /* If there is, is our client_index one of them */
280 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
281 p = hash_get (registration->client_hash, client_index);
286 return pool_elt_at_index (registration->clients, p[0]);
291 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
293 stats_main_t *sm = &stats_main;
294 vpe_client_stats_registration_t *registration;
295 vpe_client_registration_t *cr;
298 /* Is there anything listening for item in that reg */
299 p = hash_get (sm->stats_registration_hash[reg], item);
303 pool_get (sm->stats_registrations[reg], registration);
304 registration->item = item;
305 registration->client_hash = NULL;
306 registration->clients = NULL;
307 hash_set (sm->stats_registration_hash[reg], item,
308 registration - sm->stats_registrations[reg]);
312 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
315 p = hash_get (registration->client_hash, client->client_index);
319 pool_get (registration->clients, cr);
320 cr->client_index = client->client_index;
321 cr->client_pid = client->client_pid;
322 hash_set (registration->client_hash, cr->client_index,
323 cr - registration->clients);
326 return 1; //At least one client is doing something ... poll
330 clear_one_client (u32 reg_index, u32 reg, u32 item, u32 client_index)
332 stats_main_t *sm = &stats_main;
333 vpe_client_stats_registration_t *registration;
334 vpe_client_registration_t *client;
337 registration = pool_elt_at_index (sm->stats_registrations[reg], reg_index);
338 p = hash_get (registration->client_hash, client_index);
342 client = pool_elt_at_index (registration->clients, p[0]);
343 hash_unset (registration->client_hash, client->client_index);
344 pool_put (registration->clients, client);
346 /* Now check if that was the last client for that item */
347 if (0 == pool_elts (registration->clients))
349 hash_unset (sm->stats_registration_hash[reg], item);
350 hash_free (registration->client_hash);
351 pool_free (registration->clients);
352 pool_put (sm->stats_registrations[reg], registration);
358 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
360 stats_main_t *sm = &stats_main;
364 /* Clear the client first */
365 /* Is there anything listening for item in that reg */
366 p = hash_get (sm->stats_registration_hash[reg], item);
371 /* If there is, is our client_index one of them */
372 clear_one_client (p[0], reg, item, client_index);
376 /* Now check if that was the last item in any of the listened to stats */
377 for (i = 0; i < STATS_REG_N_IDX; i++)
379 elts += pool_elts (sm->stats_registrations[i]);
385 clear_client_for_all_stats (u32 client_index)
387 stats_main_t *sm = &stats_main;
388 u32 reg_index, item, reg;
392 vec_foreach_index(reg, sm->stats_registration_hash)
394 hash_foreach(item, reg_index, sm->stats_registration_hash[reg],
396 clear_one_client(reg_index, reg, item, client_index);
402 /* Now check if that was the last item in any of the listened to stats */
403 for (i = 0; i < STATS_REG_N_IDX; i++)
405 elts += pool_elts (sm->stats_registrations[i]);
410 static clib_error_t *
411 want_stats_reaper (u32 client_index)
413 stats_main_t *sm = &stats_main;
415 sm->enable_poller = clear_client_for_all_stats (client_index);
420 VL_MSG_API_REAPER_FUNCTION (want_stats_reaper);
424 * Return a copy of the clients list.
426 vpe_client_registration_t *
427 get_clients_for_stat (u32 reg, u32 item)
429 stats_main_t *sm = &stats_main;
430 vpe_client_registration_t *client, *clients = 0;
431 vpe_client_stats_registration_t *registration;
434 /* Is there anything listening for item in that reg */
435 p = hash_get (sm->stats_registration_hash[reg], item);
440 /* If there is, is our client_index one of them */
441 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
443 vec_reset_length (clients);
446 pool_foreach (client, registration->clients,
448 vec_add1 (clients, *client);}
456 clear_client_reg (u32 ** registrations)
458 /* When registrations[x] is a vector of pool indices
459 here is a good place to clean up the pools
461 #define stats_reg(n) vec_free(registrations[IDX_##n]);
462 #include <vpp/stats/stats.reg>
465 vec_free (registrations);
469 init_client_reg (u32 ** registrations)
473 Initialise the stats registrations for each
474 type of stat a client can register for as well as
475 a vector of "interested" indexes.
476 Initially this is a u32 of either sw_if_index or fib_index
477 but eventually this should migrate to a pool_index (u32)
478 with a type specific pool that can include more complex things
479 such as timing and structured events.
481 vec_validate (registrations, STATS_REG_N_IDX);
482 #define stats_reg(n) \
483 vec_reset_length(registrations[IDX_##n]);
484 #include <vpp/stats/stats.reg>
488 When registrations[x] is a vector of pool indices, here
489 is a good place to init the pools.
491 return registrations;
495 enable_all_client_reg (u32 ** registrations)
499 Enable all stats known by adding
500 ~0 to the index vector. Eventually this
501 should be deprecated.
503 #define stats_reg(n) \
504 vec_add1(registrations[IDX_##n], ~0);
505 #include <vpp/stats/stats.reg>
507 return registrations;
511 do_simple_interface_counters (stats_main_t * sm)
513 vl_api_vnet_interface_simple_counters_t *mp = 0;
514 vnet_interface_main_t *im = sm->interface_main;
515 api_main_t *am = sm->api_main;
516 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
517 svm_queue_t *q = shmem_hdr->vl_input_queue;
518 vlib_simple_counter_main_t *cm;
519 u32 items_this_message = 0;
524 * Prevent interface registration from expanding / moving the vectors...
525 * That tends never to happen, so we can hold this lock for a while.
527 vnet_interface_counter_lock (im);
529 vec_foreach (cm, im->sw_if_counters)
531 n_counts = vlib_simple_counter_n_counters (cm);
532 for (i = 0; i < n_counts; i++)
536 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
539 mp = vl_msg_api_alloc_as_if_client
540 (sizeof (*mp) + items_this_message * sizeof (v));
541 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
542 mp->vnet_counter_type = cm - im->sw_if_counters;
543 mp->first_sw_if_index = htonl (i);
545 vp = (u64 *) mp->data;
547 v = vlib_get_simple_counter (cm, i);
548 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
551 if (mp->count == items_this_message)
553 mp->count = htonl (items_this_message);
554 /* Send to the main thread... */
555 vl_msg_api_send_shmem (q, (u8 *) & mp);
561 vnet_interface_counter_unlock (im);
565 handle_client_registration (vpe_client_registration_t * client, u32 stat,
566 u32 item, int enable_disable)
568 stats_main_t *sm = &stats_main;
569 vpe_client_registration_t *rp, _rp;
571 rp = get_client_for_stat (stat, item, client->client_index);
574 if (enable_disable == 0)
576 if (!rp) // No client to disable
578 clib_warning ("pid %d: already disabled for stats...",
583 clear_client_for_stat (stat, item, client->client_index);
590 rp->client_index = client->client_index;
591 rp->client_pid = client->client_pid;
592 sm->enable_poller = set_client_for_stat (stat, item, rp);
597 /**********************************
598 * ALL Interface Combined stats - to be deprecated
599 **********************************/
602 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
605 vl_api_want_interface_combined_stats_t_handler
606 (vl_api_want_interface_combined_stats_t * mp)
608 stats_main_t *sm = &stats_main;
609 vpe_client_registration_t rp;
610 vl_api_want_interface_combined_stats_reply_t *rmp;
613 vl_api_registration_t *reg;
616 swif = ~0; //Using same mechanism as _per_interface_
617 rp.client_index = mp->client_index;
618 rp.client_pid = mp->pid;
620 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
624 reg = vl_api_client_index_to_registration (mp->client_index);
628 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
633 rmp = vl_msg_api_alloc (sizeof (*rmp));
634 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
635 rmp->context = mp->context;
636 rmp->retval = retval;
638 vl_api_send_msg (reg, (u8 *) rmp);
642 vl_api_vnet_interface_combined_counters_t_handler
643 (vl_api_vnet_interface_combined_counters_t * mp)
645 vpe_client_registration_t *clients, client;
646 stats_main_t *sm = &stats_main;
647 vl_api_registration_t *reg, *reg_prev = NULL;
648 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
652 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
655 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
656 ~0 /*flag for all */ );
658 for (i = 0; i < vec_len (clients); i++)
661 reg = vl_api_client_index_to_registration (client.client_index);
664 if (reg_prev && vl_api_can_send_msg (reg_prev))
666 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
667 clib_memcpy (mp_copy, mp, mp_size);
668 vl_api_send_msg (reg_prev, (u8 *) mp);
676 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
679 if (reg_prev && vl_api_can_send_msg (reg_prev))
681 vl_api_send_msg (reg_prev, (u8 *) mp);
685 vl_msg_api_free (mp);
690 do_combined_interface_counters (stats_main_t * sm)
692 vl_api_vnet_interface_combined_counters_t *mp = 0;
693 vnet_interface_main_t *im = sm->interface_main;
694 api_main_t *am = sm->api_main;
695 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
696 svm_queue_t *q = shmem_hdr->vl_input_queue;
697 vlib_combined_counter_main_t *cm;
698 u32 items_this_message = 0;
699 vlib_counter_t v, *vp = 0;
702 vnet_interface_counter_lock (im);
704 vec_foreach (cm, im->combined_sw_if_counters)
706 n_counts = vlib_combined_counter_n_counters (cm);
707 for (i = 0; i < n_counts; i++)
711 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
714 mp = vl_msg_api_alloc_as_if_client
715 (sizeof (*mp) + items_this_message * sizeof (v));
716 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
717 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
718 mp->first_sw_if_index = htonl (i);
720 vp = (vlib_counter_t *) mp->data;
722 vlib_get_combined_counter (cm, i, &v);
723 clib_mem_unaligned (&vp->packets, u64)
724 = clib_host_to_net_u64 (v.packets);
725 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
728 if (mp->count == items_this_message)
730 mp->count = htonl (items_this_message);
731 /* Send to the main thread... */
732 vl_msg_api_send_shmem (q, (u8 *) & mp);
738 vnet_interface_counter_unlock (im);
741 /**********************************
742 * Per Interface Combined stats
743 **********************************/
745 /* Request from client registering interfaces it wants */
747 vl_api_want_per_interface_combined_stats_t_handler
748 (vl_api_want_per_interface_combined_stats_t * mp)
750 stats_main_t *sm = &stats_main;
751 vpe_client_registration_t rp;
752 vl_api_want_per_interface_combined_stats_reply_t *rmp;
753 vlib_combined_counter_main_t *cm;
756 vl_api_registration_t *reg;
757 u32 i, swif, num = 0;
759 num = ntohl (mp->num);
762 * Validate sw_if_indexes before registering
764 for (i = 0; i < num; i++)
766 swif = ntohl (mp->sw_ifs[i]);
769 * Check its a real sw_if_index that the client is allowed to see
773 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
775 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
781 for (i = 0; i < num; i++)
783 swif = ntohl (mp->sw_ifs[i]);
785 rp.client_index = mp->client_index;
786 rp.client_pid = mp->pid;
787 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
788 swif, ntohl (mp->enable_disable));
792 reg = vl_api_client_index_to_registration (mp->client_index);
795 for (i = 0; i < num; i++)
797 swif = ntohl (mp->sw_ifs[i]);
800 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
806 rmp = vl_msg_api_alloc (sizeof (*rmp));
807 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
808 rmp->context = mp->context;
809 rmp->retval = retval;
811 vl_api_send_msg (reg, (u8 *) rmp);
814 /* Per Interface Combined distribution to client */
816 do_combined_per_interface_counters (stats_main_t * sm)
818 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
819 vnet_interface_main_t *im = sm->interface_main;
820 api_main_t *am = sm->api_main;
821 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
822 vl_api_registration_t *vl_reg;
823 vlib_combined_counter_main_t *cm;
824 vl_api_vnet_combined_counter_t *vp = 0;
827 vpe_client_stats_registration_t *reg;
828 vpe_client_registration_t *client;
829 u32 *sw_if_index = 0;
831 vnet_interface_counter_lock (im);
833 vec_reset_length (sm->regs_tmp);
837 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
838 ({ vec_add1 (sm->regs_tmp, reg); }));
841 for (i = 0; i < vec_len (sm->regs_tmp); i++)
843 reg = sm->regs_tmp[i];
846 vnet_interface_counter_unlock (im);
847 do_combined_interface_counters (sm);
848 vnet_interface_counter_lock (im);
851 vec_reset_length (sm->clients_tmp);
854 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
858 for (j = 0; j < vec_len (sm->clients_tmp); j++)
860 client = sm->clients_tmp[j];
862 vl_reg = vl_api_client_index_to_registration (client->client_index);
864 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
868 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
869 reg->item, client->client_index);
872 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
873 clib_memset (mp, 0, sizeof (*mp));
876 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
879 * count will eventually be used to optimise the batching
880 * of per client messages for each stat. For now setting this to 1 then
881 * iterate. This will not affect API.
883 * FIXME instead of enqueueing here, this should be sent to a batch
884 * storer for per-client transmission. Each "mp" sent would be a single entry
885 * and if a client is listening to other sw_if_indexes for same, it would be
886 * appended to that *mp
890 * - capturing the timestamp of the counters "when VPP knew them" is important.
891 * Less so is that the timing of the delivery to the control plane be in the same
894 * i.e. As long as the control plane can delta messages from VPP and work out
895 * velocity etc based on the timestamp, it can do so in a more "batch mode".
897 * It would be beneficial to keep a "per-client" message queue, and then
898 * batch all the stat messages for a client into one message, with
899 * discrete timestamps.
901 * Given this particular API is for "per interface" one assumes that the scale
902 * is less than the ~0 case, which the prior API is suited for.
906 * 1 message per api call for now
908 mp->count = htonl (1);
909 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
911 vp = (vl_api_vnet_combined_counter_t *) mp->data;
912 vp->sw_if_index = htonl (reg->item);
914 im = &vnet_get_main ()->interface_main;
917 cm = im->combined_sw_if_counters + X; \
918 vlib_get_combined_counter (cm, reg->item, &v); \
919 clib_mem_unaligned (&vp->x##_packets, u64) = \
920 clib_host_to_net_u64 (v.packets); \
921 clib_mem_unaligned (&vp->x##_bytes, u64) = \
922 clib_host_to_net_u64 (v.bytes);
925 _(VNET_INTERFACE_COUNTER_RX, rx);
926 _(VNET_INTERFACE_COUNTER_TX, tx);
927 _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
928 _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
929 _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
930 _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
931 _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
932 _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
936 vl_api_send_msg (vl_reg, (u8 *) mp);
940 vnet_interface_counter_unlock (im);
943 /**********************************
944 * Per Interface simple stats
945 **********************************/
947 /* Request from client registering interfaces it wants */
949 vl_api_want_per_interface_simple_stats_t_handler
950 (vl_api_want_per_interface_simple_stats_t * mp)
952 stats_main_t *sm = &stats_main;
953 vpe_client_registration_t rp;
954 vl_api_want_per_interface_simple_stats_reply_t *rmp;
955 vlib_simple_counter_main_t *cm;
958 vl_api_registration_t *reg;
959 u32 i, swif, num = 0;
961 num = ntohl (mp->num);
963 for (i = 0; i < num; i++)
965 swif = ntohl (mp->sw_ifs[i]);
967 /* Check its a real sw_if_index that the client is allowed to see */
970 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
972 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
978 for (i = 0; i < num; i++)
980 swif = ntohl (mp->sw_ifs[i]);
982 rp.client_index = mp->client_index;
983 rp.client_pid = mp->pid;
984 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
985 swif, ntohl (mp->enable_disable));
989 reg = vl_api_client_index_to_registration (mp->client_index);
991 /* Client may have disconnected abruptly, clean up */
994 for (i = 0; i < num; i++)
996 swif = ntohl (mp->sw_ifs[i]);
998 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
1006 rmp = vl_msg_api_alloc (sizeof (*rmp));
1007 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
1008 rmp->context = mp->context;
1009 rmp->retval = retval;
1011 vl_api_send_msg (reg, (u8 *) rmp);
1014 /* Per Interface Simple distribution to client */
1016 do_simple_per_interface_counters (stats_main_t * sm)
1018 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
1019 vnet_interface_main_t *im = sm->interface_main;
1020 api_main_t *am = sm->api_main;
1021 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1022 vl_api_registration_t *vl_reg;
1023 vlib_simple_counter_main_t *cm;
1025 vpe_client_stats_registration_t *reg;
1026 vpe_client_registration_t *client;
1027 u32 timestamp, count;
1028 vl_api_vnet_simple_counter_t *vp = 0;
1031 vnet_interface_counter_lock (im);
1033 vec_reset_length (sm->regs_tmp);
1037 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
1038 ({ vec_add1 (sm->regs_tmp, reg); }));
1041 for (i = 0; i < vec_len (sm->regs_tmp); i++)
1043 reg = sm->regs_tmp[i];
1044 if (reg->item == ~0)
1046 vnet_interface_counter_unlock (im);
1047 do_simple_interface_counters (sm);
1048 vnet_interface_counter_lock (im);
1051 vec_reset_length (sm->clients_tmp);
1054 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
1058 for (j = 0; j < vec_len (sm->clients_tmp); j++)
1060 client = sm->clients_tmp[j];
1061 vl_reg = vl_api_client_index_to_registration (client->client_index);
1063 /* Client may have disconnected abrubtly, clean up */
1067 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1068 reg->item, client->client_index);
1072 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1073 clib_memset (mp, 0, sizeof (*mp));
1074 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1077 * count will eventually be used to optimise the batching
1078 * of per client messages for each stat. For now setting this to 1 then
1079 * iterate. This will not affect API.
1081 * FIXME instead of enqueueing here, this should be sent to a batch
1082 * storer for per-client transmission. Each "mp" sent would be a single entry
1083 * and if a client is listening to other sw_if_indexes for same, it would be
1084 * appended to that *mp
1088 * - capturing the timestamp of the counters "when VPP knew them" is important.
1089 * Less so is that the timing of the delivery to the control plane be in the same
1092 * i.e. As long as the control plane can delta messages from VPP and work out
1093 * velocity etc based on the timestamp, it can do so in a more "batch mode".
1095 * It would be beneficial to keep a "per-client" message queue, and then
1096 * batch all the stat messages for a client into one message, with
1097 * discrete timestamps.
1099 * Given this particular API is for "per interface" one assumes that the scale
1100 * is less than the ~0 case, which the prior API is suited for.
1104 * 1 message per api call for now
1106 mp->count = htonl (1);
1107 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1108 vp = (vl_api_vnet_simple_counter_t *) mp->data;
1110 vp->sw_if_index = htonl (reg->item);
1112 // VNET_INTERFACE_COUNTER_DROP
1113 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1114 v = vlib_get_simple_counter (cm, reg->item);
1115 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1117 // VNET_INTERFACE_COUNTER_PUNT
1118 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1119 v = vlib_get_simple_counter (cm, reg->item);
1120 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1122 // VNET_INTERFACE_COUNTER_IP4
1123 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1124 v = vlib_get_simple_counter (cm, reg->item);
1125 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1127 //VNET_INTERFACE_COUNTER_IP6
1128 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1129 v = vlib_get_simple_counter (cm, reg->item);
1130 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1132 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1133 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1134 v = vlib_get_simple_counter (cm, reg->item);
1135 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1136 clib_host_to_net_u64 (v);
1138 //VNET_INTERFACE_COUNTER_RX_MISS
1139 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1140 v = vlib_get_simple_counter (cm, reg->item);
1141 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1143 //VNET_INTERFACE_COUNTER_RX_ERROR
1144 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1145 v = vlib_get_simple_counter (cm, reg->item);
1146 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1148 //VNET_INTERFACE_COUNTER_TX_ERROR
1149 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1150 v = vlib_get_simple_counter (cm, reg->item);
1151 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1153 //VNET_INTERFACE_COUNTER_MPLS
1154 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1155 v = vlib_get_simple_counter (cm, reg->item);
1156 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1158 vl_api_send_msg (vl_reg, (u8 *) mp);
1162 vnet_interface_counter_unlock (im);
1165 /**********************************
1167 **********************************/
1170 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1172 struct timespec _req, *req = &_req;
1173 struct timespec _rem, *rem = &_rem;
1176 req->tv_nsec = nsec;
1179 if (nanosleep (req, rem) == 0)
1184 clib_unix_warning ("nanosleep");
1190 * @brief The context passed when collecting adjacency counters
1192 typedef struct ip4_nbr_stats_ctx_t_
1195 * The SW IF index all these adjs belong to
1200 * A vector of ip4 nbr counters
1202 vl_api_ip4_nbr_counter_t *counters;
1203 } ip4_nbr_stats_ctx_t;
1205 static adj_walk_rc_t
1206 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1208 vl_api_ip4_nbr_counter_t *vl_counter;
1209 vlib_counter_t adj_counter;
1210 ip4_nbr_stats_ctx_t *ctx;
1211 ip_adjacency_t *adj;
1214 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1216 if (0 != adj_counter.packets)
1218 vec_add2 (ctx->counters, vl_counter, 1);
1221 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1222 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1223 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1224 vl_counter->link_type = adj->ia_link;
1226 return (ADJ_WALK_RC_CONTINUE);
1229 #define MIN(x,y) (((x)<(y))?(x):(y))
1232 send_and_pause (stats_main_t * sm, svm_queue_t * q, u8 * mp)
1237 pause = svm_queue_is_full (q);
1239 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1240 svm_queue_unlock (q);
1244 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1245 STATS_RELEASE_DELAY_NS);
1249 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1251 api_main_t *am = sm->api_main;
1252 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1253 svm_queue_t *q = shmem_hdr->vl_input_queue;
1254 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1258 * If the walk context has counters, which may be left over from the last
1259 * suspend, then we continue from there.
1261 while (0 != vec_len (ctx->counters))
1263 u32 n_items = MIN (vec_len (ctx->counters),
1264 IP4_FIB_COUNTER_BATCH_SIZE);
1267 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1269 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1272 (vl_api_ip4_nbr_counter_t)));
1273 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1274 mp->count = ntohl (n_items);
1275 mp->sw_if_index = ntohl (ctx->sw_if_index);
1280 * copy the counters from the back of the context, then we can easily
1281 * 'erase' them by resetting the vector length.
1282 * The order we push the stats to the caller is not important.
1285 &ctx->counters[vec_len (ctx->counters) - n_items],
1286 n_items * sizeof (*ctx->counters));
1288 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1293 send_and_pause (sm, q, (u8 *) & mp);
1298 do_ip4_nbr_counters (stats_main_t * sm)
1300 vnet_main_t *vnm = vnet_get_main ();
1301 vnet_interface_main_t *im = &vnm->interface_main;
1302 vnet_sw_interface_t *si;
1304 ip4_nbr_stats_ctx_t ctx = {
1310 pool_foreach (si, im->sw_interfaces,
1313 * update the interface we are now concerned with
1315 ctx.sw_if_index = si->sw_if_index;
1318 * we are about to walk another interface, so we shouldn't have any pending
1321 ASSERT(ctx.counters == NULL);
1324 * visit each neighbour adjacency on the interface and collect
1325 * its current stats.
1326 * Because we hold the lock the walk is synchronous, so safe to routing
1327 * updates. It's limited in work by the number of adjacenies on an
1328 * interface, which is typically not huge.
1330 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1331 adj_nbr_walk (si->sw_if_index,
1338 * if this interface has some adjacencies with counters then ship them,
1339 * else continue to the next interface.
1341 if (NULL != ctx.counters)
1343 ip4_nbr_ship(sm, &ctx);
1350 * @brief The context passed when collecting adjacency counters
1352 typedef struct ip6_nbr_stats_ctx_t_
1355 * The SW IF index all these adjs belong to
1360 * A vector of ip6 nbr counters
1362 vl_api_ip6_nbr_counter_t *counters;
1363 } ip6_nbr_stats_ctx_t;
1365 static adj_walk_rc_t
1366 ip6_nbr_stats_cb (adj_index_t ai,
1369 vl_api_ip6_nbr_counter_t *vl_counter;
1370 vlib_counter_t adj_counter;
1371 ip6_nbr_stats_ctx_t *ctx;
1372 ip_adjacency_t *adj;
1375 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1377 if (0 != adj_counter.packets)
1379 vec_add2(ctx->counters, vl_counter, 1);
1382 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1383 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1384 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1385 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1386 vl_counter->link_type = adj->ia_link;
1388 return (ADJ_WALK_RC_CONTINUE);
1391 #define MIN(x,y) (((x)<(y))?(x):(y))
1394 ip6_nbr_ship (stats_main_t * sm,
1395 ip6_nbr_stats_ctx_t *ctx)
1397 api_main_t *am = sm->api_main;
1398 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1399 svm_queue_t *q = shmem_hdr->vl_input_queue;
1400 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1404 * If the walk context has counters, which may be left over from the last
1405 * suspend, then we continue from there.
1407 while (0 != vec_len(ctx->counters))
1409 u32 n_items = MIN (vec_len (ctx->counters),
1410 IP6_FIB_COUNTER_BATCH_SIZE);
1413 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1415 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1418 (vl_api_ip6_nbr_counter_t)));
1419 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1420 mp->count = ntohl (n_items);
1421 mp->sw_if_index = ntohl (ctx->sw_if_index);
1426 * copy the counters from the back of the context, then we can easily
1427 * 'erase' them by resetting the vector length.
1428 * The order we push the stats to the caller is not important.
1431 &ctx->counters[vec_len (ctx->counters) - n_items],
1432 n_items * sizeof (*ctx->counters));
1434 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1439 send_and_pause(sm, q, (u8 *) & mp);
1444 do_ip6_nbr_counters (stats_main_t * sm)
1446 vnet_main_t *vnm = vnet_get_main ();
1447 vnet_interface_main_t *im = &vnm->interface_main;
1448 vnet_sw_interface_t *si;
1450 ip6_nbr_stats_ctx_t ctx = {
1456 pool_foreach (si, im->sw_interfaces,
1459 * update the interface we are now concerned with
1461 ctx.sw_if_index = si->sw_if_index;
1464 * we are about to walk another interface, so we shouldn't have any pending
1467 ASSERT(ctx.counters == NULL);
1470 * visit each neighbour adjacency on the interface and collect
1471 * its current stats.
1472 * Because we hold the lock the walk is synchronous, so safe to routing
1473 * updates. It's limited in work by the number of adjacenies on an
1474 * interface, which is typically not huge.
1476 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1477 adj_nbr_walk (si->sw_if_index,
1484 * if this interface has some adjacencies with counters then ship them,
1485 * else continue to the next interface.
1487 if (NULL != ctx.counters)
1489 ip6_nbr_ship(sm, &ctx);
1496 do_ip4_fib_counters (stats_main_t * sm)
1498 ip4_main_t *im4 = &ip4_main;
1499 api_main_t *am = sm->api_main;
1500 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1501 svm_queue_t *q = shmem_hdr->vl_input_queue;
1505 do_ip46_fibs_t *do_fibs;
1506 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1507 u32 items_this_message;
1508 vl_api_ip4_fib_counter_t *ctrp = 0;
1509 u32 start_at_fib_index = 0;
1512 do_fibs = &sm->do_ip46_fibs;
1515 vec_reset_length (do_fibs->fibs);
1517 pool_foreach (fib, im4->fibs,
1518 ({vec_add1(do_fibs->fibs,fib);}));
1522 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1524 fib = do_fibs->fibs[j];
1525 /* We may have bailed out due to control-plane activity */
1526 while ((fib - im4->fibs) < start_at_fib_index)
1529 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1533 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1534 mp = vl_msg_api_alloc_as_if_client
1536 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1537 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1539 mp->vrf_id = ntohl (fib->ft_table_id);
1540 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1544 /* happens if the last FIB was empty... */
1545 ASSERT (mp->count == 0);
1546 mp->vrf_id = ntohl (fib->ft_table_id);
1549 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1551 vec_reset_length (do_fibs->ip4routes);
1552 vec_reset_length (do_fibs->results);
1554 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1556 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1560 vec_reset_length (do_fibs->pvec);
1562 x.address_length = i;
1564 hash_foreach_pair (p, hash, (
1566 vec_add1 (do_fibs->pvec, p);}
1568 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1570 p = do_fibs->pvec[k];
1571 x.address.data_u32 = p->key;
1572 x.index = p->value[0];
1574 vec_add1 (do_fibs->ip4routes, x);
1575 if (sm->data_structure_lock->release_hint)
1577 start_at_fib_index = fib - im4->fibs;
1579 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1580 STATS_RELEASE_DELAY_NS);
1582 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1588 vec_foreach (r, do_fibs->ip4routes)
1591 const dpo_id_t *dpo_id;
1594 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1595 index = (u32) dpo_id->dpoi_index;
1597 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1600 * If it has actually
1601 * seen at least one packet, send it.
1606 /* already in net byte order */
1607 ctrp->address = r->address.as_u32;
1608 ctrp->address_length = r->address_length;
1609 ctrp->packets = clib_host_to_net_u64 (c.packets);
1610 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1614 if (mp->count == items_this_message)
1616 mp->count = htonl (items_this_message);
1618 * If the main thread's input queue is stuffed,
1619 * drop the data structure lock (which the main thread
1620 * may want), and take a pause.
1623 if (svm_queue_is_full (q))
1626 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1627 svm_queue_unlock (q);
1629 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1630 STATS_RELEASE_DELAY_NS);
1633 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1634 svm_queue_unlock (q);
1636 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1637 mp = vl_msg_api_alloc_as_if_client
1639 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1640 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1642 mp->vrf_id = ntohl (fib->ft_table_id);
1643 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1645 } /* for each (mp or single) adj */
1646 if (sm->data_structure_lock->release_hint)
1648 start_at_fib_index = fib - im4->fibs;
1650 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1652 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1655 } /* vec_foreach (routes) */
1659 /* Flush any data from this fib */
1662 mp->count = htonl (mp->count);
1663 vl_msg_api_send_shmem (q, (u8 *) & mp);
1668 /* If e.g. the last FIB had no reportable routes, free the buffer */
1670 vl_msg_api_free (mp);
1674 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1676 stats_main_t *sm = ctx;
1677 do_ip46_fibs_t *do_fibs;
1678 mfib_entry_t *entry;
1680 do_fibs = &sm->do_ip46_fibs;
1681 entry = mfib_entry_get (fei);
1683 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1689 do_ip4_mfib_counters (stats_main_t * sm)
1691 ip4_main_t *im4 = &ip4_main;
1692 api_main_t *am = sm->api_main;
1693 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1694 svm_queue_t *q = shmem_hdr->vl_input_queue;
1697 do_ip46_fibs_t *do_fibs;
1698 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1699 u32 items_this_message;
1700 vl_api_ip4_mfib_counter_t *ctrp = 0;
1701 u32 start_at_mfib_index = 0;
1704 do_fibs = &sm->do_ip46_fibs;
1706 vec_reset_length (do_fibs->mfibs);
1708 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1711 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1713 mfib = do_fibs->mfibs[j];
1714 /* We may have bailed out due to control-plane activity */
1715 while ((mfib - im4->mfibs) < start_at_mfib_index)
1720 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1721 mp = vl_msg_api_alloc_as_if_client
1723 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1724 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1726 mp->vrf_id = ntohl (mfib->mft_table_id);
1727 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1731 /* happens if the last MFIB was empty... */
1732 ASSERT (mp->count == 0);
1733 mp->vrf_id = ntohl (mfib->mft_table_id);
1736 vec_reset_length (do_fibs->mroutes);
1739 * walk the table with table updates blocked
1741 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1743 mfib_table_walk (mfib->mft_index,
1744 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1747 vec_foreach (pfx, do_fibs->mroutes)
1749 const dpo_id_t *dpo_id;
1750 fib_node_index_t mfei;
1755 * re-lookup the entry, since we suspend during the collection
1757 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1759 if (FIB_NODE_INDEX_INVALID == mfei)
1762 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1763 index = (u32) dpo_id->dpoi_index;
1765 vlib_get_combined_counter (&replicate_main.repm_counters,
1766 dpo_id->dpoi_index, &c);
1768 * If it has seen at least one packet, send it.
1772 /* already in net byte order */
1773 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1774 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1775 ctrp->group_length = pfx->fp_len;
1776 ctrp->packets = clib_host_to_net_u64 (c.packets);
1777 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1781 if (mp->count == items_this_message)
1783 mp->count = htonl (items_this_message);
1785 * If the main thread's input queue is stuffed,
1786 * drop the data structure lock (which the main thread
1787 * may want), and take a pause.
1791 while (svm_queue_is_full (q))
1793 svm_queue_unlock (q);
1794 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1795 STATS_RELEASE_DELAY_NS);
1798 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1799 svm_queue_unlock (q);
1801 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1802 mp = vl_msg_api_alloc_as_if_client
1804 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1805 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1807 mp->vrf_id = ntohl (mfib->mft_table_id);
1808 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1813 /* Flush any data from this mfib */
1816 mp->count = htonl (mp->count);
1817 vl_msg_api_send_shmem (q, (u8 *) & mp);
1822 /* If e.g. the last FIB had no reportable routes, free the buffer */
1824 vl_msg_api_free (mp);
1828 do_ip6_mfib_counters (stats_main_t * sm)
1830 ip6_main_t *im6 = &ip6_main;
1831 api_main_t *am = sm->api_main;
1832 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1833 svm_queue_t *q = shmem_hdr->vl_input_queue;
1836 do_ip46_fibs_t *do_fibs;
1837 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1838 u32 items_this_message;
1839 vl_api_ip6_mfib_counter_t *ctrp = 0;
1840 u32 start_at_mfib_index = 0;
1843 do_fibs = &sm->do_ip46_fibs;
1845 vec_reset_length (do_fibs->mfibs);
1847 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1850 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1852 mfib = do_fibs->mfibs[j];
1853 /* We may have bailed out due to control-plane activity */
1854 while ((mfib - im6->mfibs) < start_at_mfib_index)
1859 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1860 mp = vl_msg_api_alloc_as_if_client
1862 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1863 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1865 mp->vrf_id = ntohl (mfib->mft_table_id);
1866 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1870 /* happens if the last MFIB was empty... */
1871 ASSERT (mp->count == 0);
1872 mp->vrf_id = ntohl (mfib->mft_table_id);
1875 vec_reset_length (do_fibs->mroutes);
1878 * walk the table with table updates blocked
1880 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1882 mfib_table_walk (mfib->mft_index,
1883 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1886 vec_foreach (pfx, do_fibs->mroutes)
1888 const dpo_id_t *dpo_id;
1889 fib_node_index_t mfei;
1894 * re-lookup the entry, since we suspend during the collection
1896 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1898 if (FIB_NODE_INDEX_INVALID == mfei)
1901 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1902 index = (u32) dpo_id->dpoi_index;
1904 vlib_get_combined_counter (&replicate_main.repm_counters,
1905 dpo_id->dpoi_index, &c);
1907 * If it has seen at least one packet, send it.
1911 /* already in net byte order */
1912 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1913 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1914 ctrp->group_length = pfx->fp_len;
1915 ctrp->packets = clib_host_to_net_u64 (c.packets);
1916 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1920 if (mp->count == items_this_message)
1922 mp->count = htonl (items_this_message);
1924 * If the main thread's input queue is stuffed,
1925 * drop the data structure lock (which the main thread
1926 * may want), and take a pause.
1930 while (svm_queue_is_full (q))
1932 svm_queue_unlock (q);
1933 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1934 STATS_RELEASE_DELAY_NS);
1937 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1938 svm_queue_unlock (q);
1940 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1941 mp = vl_msg_api_alloc_as_if_client
1943 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1944 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1946 mp->vrf_id = ntohl (mfib->mft_table_id);
1947 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1952 /* Flush any data from this mfib */
1955 mp->count = htonl (mp->count);
1956 vl_msg_api_send_shmem (q, (u8 *) & mp);
1961 /* If e.g. the last FIB had no reportable routes, free the buffer */
1963 vl_msg_api_free (mp);
1969 ip6_route_t **routep;
1971 } add_routes_in_fib_arg_t;
1974 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1976 add_routes_in_fib_arg_t *ap = arg;
1977 stats_main_t *sm = ap->sm;
1979 if (sm->data_structure_lock->release_hint)
1980 clib_longjmp (&sm->jmp_buf, 1);
1982 if (kvp->key[2] >> 32 == ap->fib_index)
1984 ip6_address_t *addr;
1986 addr = (ip6_address_t *) kvp;
1987 vec_add2 (*ap->routep, r, 1);
1988 r->address = addr[0];
1989 r->address_length = kvp->key[2] & 0xFF;
1990 r->index = kvp->value;
1995 do_ip6_fib_counters (stats_main_t * sm)
1997 ip6_main_t *im6 = &ip6_main;
1998 api_main_t *am = sm->api_main;
1999 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
2000 svm_queue_t *q = shmem_hdr->vl_input_queue;
2003 do_ip46_fibs_t *do_fibs;
2004 vl_api_vnet_ip6_fib_counters_t *mp = 0;
2005 u32 items_this_message;
2006 vl_api_ip6_fib_counter_t *ctrp = 0;
2007 u32 start_at_fib_index = 0;
2008 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
2009 add_routes_in_fib_arg_t _a, *a = &_a;
2012 do_fibs = &sm->do_ip46_fibs;
2014 vec_reset_length (do_fibs->fibs);
2016 pool_foreach (fib, im6->fibs,
2017 ({vec_add1(do_fibs->fibs,fib);}));
2021 for (i = 0; i < vec_len (do_fibs->fibs); i++)
2023 fib = do_fibs->fibs[i];
2024 /* We may have bailed out due to control-plane activity */
2025 while ((fib - im6->fibs) < start_at_fib_index)
2030 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2031 mp = vl_msg_api_alloc_as_if_client
2033 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2034 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2036 mp->vrf_id = ntohl (fib->ft_table_id);
2037 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2040 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2042 vec_reset_length (do_fibs->ip6routes);
2043 vec_reset_length (do_fibs->results);
2045 a->fib_index = fib - im6->fibs;
2046 a->routep = &do_fibs->ip6routes;
2049 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
2051 start_at_fib_index = fib - im6->fibs;
2052 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
2057 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2058 STATS_RELEASE_DELAY_NS);
2060 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2064 vec_foreach (r, do_fibs->ip6routes)
2068 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2071 * If it has actually
2072 * seen at least one packet, send it.
2076 /* already in net byte order */
2077 ctrp->address[0] = r->address.as_u64[0];
2078 ctrp->address[1] = r->address.as_u64[1];
2079 ctrp->address_length = (u8) r->address_length;
2080 ctrp->packets = clib_host_to_net_u64 (c.packets);
2081 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2085 if (mp->count == items_this_message)
2087 mp->count = htonl (items_this_message);
2089 * If the main thread's input queue is stuffed,
2090 * drop the data structure lock (which the main thread
2091 * may want), and take a pause.
2094 if (svm_queue_is_full (q))
2097 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2098 svm_queue_unlock (q);
2100 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2101 STATS_RELEASE_DELAY_NS);
2104 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2105 svm_queue_unlock (q);
2107 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2108 mp = vl_msg_api_alloc_as_if_client
2110 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2111 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2113 mp->vrf_id = ntohl (fib->ft_table_id);
2114 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2118 if (sm->data_structure_lock->release_hint)
2120 start_at_fib_index = fib - im6->fibs;
2122 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2124 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2127 } /* vec_foreach (routes) */
2131 /* Flush any data from this fib */
2134 mp->count = htonl (mp->count);
2135 vl_msg_api_send_shmem (q, (u8 *) & mp);
2140 /* If e.g. the last FIB had no reportable routes, free the buffer */
2142 vl_msg_api_free (mp);
2145 typedef struct udp_encap_stats_walk_t_
2147 vl_api_udp_encap_counter_t *stats;
2148 } udp_encap_stats_walk_t;
2151 udp_encap_stats_walk_cb (index_t uei, void *arg)
2153 udp_encap_stats_walk_t *ctx = arg;
2154 vl_api_udp_encap_counter_t *stat;
2157 vec_add2 (ctx->stats, stat, 1);
2159 udp_encap_get_stats (uei, &stat->packets, &stat->bytes);
2161 return (WALK_CONTINUE);
2165 udp_encap_ship (udp_encap_stats_walk_t * ctx)
2167 vl_api_vnet_udp_encap_counters_t *mp;
2168 vl_shmem_hdr_t *shmem_hdr;
2176 shmem_hdr = am->shmem_hdr;
2177 q = shmem_hdr->vl_input_queue;
2180 * If the walk context has counters, which may be left over from the last
2181 * suspend, then we continue from there.
2183 while (0 != vec_len (ctx->stats))
2185 u32 n_items = MIN (vec_len (ctx->stats),
2186 UDP_ENCAP_COUNTER_BATCH_SIZE);
2189 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2191 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2194 (vl_api_udp_encap_counter_t)));
2195 mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2196 mp->count = ntohl (n_items);
2199 * copy the counters from the back of the context, then we can easily
2200 * 'erase' them by resetting the vector length.
2201 * The order we push the stats to the caller is not important.
2204 &ctx->stats[vec_len (ctx->stats) - n_items],
2205 n_items * sizeof (*ctx->stats));
2207 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2212 send_and_pause (sm, q, (u8 *) & mp);
2217 do_udp_encap_counters (stats_main_t * sm)
2219 vl_api_udp_encap_counter_t *stat;
2221 udp_encap_stats_walk_t ctx = {
2225 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2226 udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
2229 udp_encap_ship (&ctx);
2232 typedef struct bier_neighbor_stats_walk_t_
2234 vl_api_bier_neighbor_counter_t *stats;
2235 } bier_neighbor_stats_walk_t;
2238 bier_neighbor_stats_walk_cb (index_t bfmi, void *arg)
2240 bier_neighbor_stats_walk_t *ctx = arg;
2241 vl_api_bier_neighbor_counter_t *stat;
2242 fib_route_path_encode_t rpath;
2243 bier_table_id_t btid;
2245 vec_add2 (ctx->stats, stat, 1);
2247 bier_fmask_encode (bfmi, &btid, &rpath);
2249 stat->tbl_id.bt_set = btid.bti_set;
2250 stat->tbl_id.bt_sub_domain = btid.bti_sub_domain;
2251 stat->tbl_id.bt_hdr_len_id = btid.bti_hdr_len;
2252 fib_api_path_encode (&rpath, &stat->path);
2253 bier_fmask_get_stats (bfmi, &stat->packets, &stat->bytes);
2255 return (WALK_CONTINUE);
2259 bier_neighbor_ship (bier_neighbor_stats_walk_t * ctx)
2261 vl_api_vnet_bier_neighbor_counters_t *mp;
2262 vl_shmem_hdr_t *shmem_hdr;
2270 shmem_hdr = am->shmem_hdr;
2271 q = shmem_hdr->vl_input_queue;
2274 * If the walk context has counters, which may be left over from the last
2275 * suspend, then we continue from there.
2277 while (0 != vec_len (ctx->stats))
2279 u32 n_items = MIN (vec_len (ctx->stats),
2280 BIER_NEIGHBOR_COUNTER_BATCH_SIZE);
2283 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2285 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2288 (vl_api_bier_neighbor_counter_t)));
2289 mp->_vl_msg_id = ntohs (VL_API_VNET_BIER_NEIGHBOR_COUNTERS);
2290 mp->count = ntohl (n_items);
2293 * copy the counters from the back of the context, then we can easily
2294 * 'erase' them by resetting the vector length.
2295 * The order we push the stats to the caller is not important.
2298 &ctx->stats[vec_len (ctx->stats) - n_items],
2299 n_items * sizeof (*ctx->stats));
2301 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2306 send_and_pause (sm, q, (u8 *) & mp);
2311 do_bier_neighbor_counters (stats_main_t * sm)
2313 vl_api_bier_neighbor_counter_t *stat;
2315 bier_neighbor_stats_walk_t ctx = {
2319 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2320 bier_fmask_db_walk (bier_neighbor_stats_walk_cb, &ctx);
2323 bier_neighbor_ship (&ctx);
2327 stats_set_poller_delay (u32 poller_delay_sec)
2329 stats_main_t *sm = &stats_main;
2330 if (!poller_delay_sec)
2332 return VNET_API_ERROR_INVALID_ARGUMENT;
2336 sm->stats_poll_interval_in_seconds = poller_delay_sec;
2341 static clib_error_t *
2342 stats_config (vlib_main_t * vm, unformat_input_t * input)
2344 stats_main_t *sm = &stats_main;
2347 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2349 if (unformat (input, "interval %u", &sec))
2351 int rv = stats_set_poller_delay (sec);
2354 return clib_error_return (0,
2355 "`stats_set_poller_delay' API call failed, rv=%d:%U",
2356 (int) rv, format_vnet_api_errno, rv);
2361 return clib_error_return (0, "unknown input '%U'",
2362 format_unformat_error, input);
2369 /* stats { ... } configuration. */
2372 * @cfgcmd{interval, <seconds>}
2373 * Configure stats poller delay to be @c seconds.
2376 VLIB_CONFIG_FUNCTION (stats_config, "stats");
2379 vl_api_stats_get_poller_delay_t_handler
2380 (vl_api_stats_get_poller_delay_t * mp)
2382 stats_main_t *sm = &stats_main;
2383 vl_api_registration_t *reg;
2384 reg = vl_api_client_index_to_registration (mp->client_index);
2387 vl_api_stats_get_poller_delay_reply_t *rmp;
2389 rmp = vl_msg_api_alloc (sizeof (*rmp));
2390 rmp->_vl_msg_id = ntohs (VL_API_STATS_GET_POLLER_DELAY_REPLY);
2391 rmp->context = mp->context;
2393 rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2395 vl_api_send_msg (reg, (u8 *) rmp);
2400 stats_thread_fn (void *arg)
2402 stats_main_t *sm = &stats_main;
2403 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2404 vlib_thread_main_t *tm = vlib_get_thread_main ();
2406 /* stats thread wants no signals. */
2410 pthread_sigmask (SIG_SETMASK, &s, 0);
2413 clib_mem_set_heap (w->thread_mheap);
2415 if (vec_len (tm->thread_prefix))
2416 vlib_set_thread_name ((char *)
2417 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2421 ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
2424 if (!(sm->enable_poller))
2428 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2429 do_combined_per_interface_counters (sm);
2432 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2433 do_simple_per_interface_counters (sm);
2435 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2436 do_ip4_fib_counters (sm);
2438 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2439 do_ip6_fib_counters (sm);
2441 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2442 do_ip4_mfib_counters (sm);
2444 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2445 do_ip6_mfib_counters (sm);
2447 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2448 do_ip4_nbr_counters (sm);
2450 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2451 do_ip6_nbr_counters (sm);
2453 if (pool_elts (sm->stats_registrations[IDX_BIER_NEIGHBOR_COUNTERS]))
2454 do_bier_neighbor_counters (sm);
2459 vl_api_vnet_interface_simple_counters_t_handler
2460 (vl_api_vnet_interface_simple_counters_t * mp)
2462 vpe_client_registration_t *clients, client;
2463 stats_main_t *sm = &stats_main;
2464 vl_api_registration_t *reg, *reg_prev = NULL;
2465 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2469 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2472 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2473 ~0 /*flag for all */ );
2475 for (i = 0; i < vec_len (clients); i++)
2477 client = clients[i];
2478 reg = vl_api_client_index_to_registration (client.client_index);
2481 if (reg_prev && vl_api_can_send_msg (reg_prev))
2483 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2484 clib_memcpy (mp_copy, mp, mp_size);
2485 vl_api_send_msg (reg_prev, (u8 *) mp);
2493 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2494 client.client_index);
2501 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2504 if (reg_prev && vl_api_can_send_msg (reg_prev))
2506 vl_api_send_msg (reg_prev, (u8 *) mp);
2510 vl_msg_api_free (mp);
2515 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2517 stats_main_t *sm = &stats_main;
2518 vl_api_registration_t *reg, *reg_prev = NULL;
2519 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2521 vpe_client_registration_t *clients, client;
2524 mp_size = sizeof (*mp_copy) +
2525 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2528 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2530 for (i = 0; i < vec_len (clients); i++)
2532 client = clients[i];
2533 reg = vl_api_client_index_to_registration (client.client_index);
2536 if (reg_prev && vl_api_can_send_msg (reg_prev))
2538 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2539 clib_memcpy (mp_copy, mp, mp_size);
2540 vl_api_send_msg (reg_prev, (u8 *) mp);
2547 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2548 ~0, client.client_index);
2554 if (reg_prev && vl_api_can_send_msg (reg_prev))
2556 vl_api_send_msg (reg_prev, (u8 *) mp);
2560 vl_msg_api_free (mp);
2565 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2567 stats_main_t *sm = &stats_main;
2568 vl_api_registration_t *reg, *reg_prev = NULL;
2569 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2571 vpe_client_registration_t *clients, client;
2574 mp_size = sizeof (*mp_copy) +
2575 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2578 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2580 for (i = 0; i < vec_len (clients); i++)
2582 client = clients[i];
2583 reg = vl_api_client_index_to_registration (client.client_index);
2586 if (reg_prev && vl_api_can_send_msg (reg_prev))
2588 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2589 clib_memcpy (mp_copy, mp, mp_size);
2590 vl_api_send_msg (reg_prev, (u8 *) mp);
2597 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2598 ~0, client.client_index);
2605 if (reg_prev && vl_api_can_send_msg (reg_prev))
2607 vl_api_send_msg (reg_prev, (u8 *) mp);
2611 vl_msg_api_free (mp);
2616 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2618 stats_main_t *sm = &stats_main;
2619 vl_api_registration_t *reg, *reg_prev = NULL;
2620 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2622 vpe_client_registration_t *clients, client;
2625 mp_size = sizeof (*mp_copy) +
2626 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2629 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2631 for (i = 0; i < vec_len (clients); i++)
2633 client = clients[i];
2634 reg = vl_api_client_index_to_registration (client.client_index);
2637 if (reg_prev && vl_api_can_send_msg (reg_prev))
2639 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2640 clib_memcpy (mp_copy, mp, mp_size);
2641 vl_api_send_msg (reg_prev, (u8 *) mp);
2648 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2649 ~0, client.client_index);
2656 if (reg_prev && vl_api_can_send_msg (reg_prev))
2658 vl_api_send_msg (reg_prev, (u8 *) mp);
2662 vl_msg_api_free (mp);
2667 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2669 stats_main_t *sm = &stats_main;
2670 vl_api_registration_t *reg, *reg_prev = NULL;
2671 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2673 vpe_client_registration_t *clients, client;
2676 mp_size = sizeof (*mp_copy) +
2677 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2680 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2682 for (i = 0; i < vec_len (clients); i++)
2684 client = clients[i];
2685 reg = vl_api_client_index_to_registration (client.client_index);
2688 if (reg_prev && vl_api_can_send_msg (reg_prev))
2690 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2691 clib_memcpy (mp_copy, mp, mp_size);
2692 vl_api_send_msg (reg_prev, (u8 *) mp);
2699 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2700 ~0, client.client_index);
2707 if (reg_prev && vl_api_can_send_msg (reg_prev))
2709 vl_api_send_msg (reg_prev, (u8 *) mp);
2713 vl_msg_api_free (mp);
2718 vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
2720 stats_main_t *sm = &stats_main;
2721 vpe_client_registration_t rp;
2722 vl_api_want_udp_encap_stats_reply_t *rmp;
2725 vl_api_registration_t *reg;
2728 fib = ~0; //Using same mechanism as _per_interface_
2729 rp.client_index = mp->client_index;
2730 rp.client_pid = mp->pid;
2732 handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2735 reg = vl_api_client_index_to_registration (mp->client_index);
2739 sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2740 fib, mp->client_index);
2744 rmp = vl_msg_api_alloc (sizeof (*rmp));
2745 rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2746 rmp->context = mp->context;
2747 rmp->retval = retval;
2749 vl_api_send_msg (reg, (u8 *) rmp);
2753 vl_api_want_bier_neighbor_stats_t_handler (vl_api_want_bier_neighbor_stats_t *
2756 stats_main_t *sm = &stats_main;
2757 vpe_client_registration_t rp;
2758 vl_api_want_bier_neighbor_stats_reply_t *rmp;
2761 vl_api_registration_t *reg;
2764 fib = ~0; //Using same mechanism as _per_interface_
2765 rp.client_index = mp->client_index;
2766 rp.client_pid = mp->pid;
2768 handle_client_registration (&rp, IDX_BIER_NEIGHBOR_COUNTERS, fib,
2772 reg = vl_api_client_index_to_registration (mp->client_index);
2776 sm->enable_poller = clear_client_for_stat (IDX_BIER_NEIGHBOR_COUNTERS,
2777 fib, mp->client_index);
2781 rmp = vl_msg_api_alloc (sizeof (*rmp));
2782 rmp->_vl_msg_id = ntohs (VL_API_WANT_BIER_NEIGHBOR_STATS_REPLY);
2783 rmp->context = mp->context;
2784 rmp->retval = retval;
2786 vl_api_send_msg (reg, (u8 *) rmp);
2790 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2792 stats_main_t *sm = &stats_main;
2793 vpe_client_registration_t rp;
2794 vl_api_want_stats_reply_t *rmp;
2798 vl_api_registration_t *reg;
2800 item = ~0; //"ALL THE THINGS IN THE THINGS
2801 rp.client_index = mp->client_index;
2802 rp.client_pid = mp->pid;
2804 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2805 item, mp->enable_disable);
2807 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2808 item, mp->enable_disable);
2810 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2811 item, mp->enable_disable);
2813 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2814 item, mp->enable_disable);
2816 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2817 item, mp->enable_disable);
2819 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2820 item, mp->enable_disable);
2823 reg = vl_api_client_index_to_registration (mp->client_index);
2827 rmp = vl_msg_api_alloc (sizeof (*rmp));
2828 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2829 rmp->context = mp->context;
2830 rmp->retval = retval;
2832 vl_api_send_msg (reg, (u8 *) rmp);
2836 vl_api_want_interface_simple_stats_t_handler
2837 (vl_api_want_interface_simple_stats_t * mp)
2839 stats_main_t *sm = &stats_main;
2840 vpe_client_registration_t rp;
2841 vl_api_want_interface_simple_stats_reply_t *rmp;
2845 vl_api_registration_t *reg;
2847 swif = ~0; //Using same mechanism as _per_interface_
2848 rp.client_index = mp->client_index;
2849 rp.client_pid = mp->pid;
2851 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2852 mp->enable_disable);
2855 reg = vl_api_client_index_to_registration (mp->client_index);
2860 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2865 rmp = vl_msg_api_alloc (sizeof (*rmp));
2866 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2867 rmp->context = mp->context;
2868 rmp->retval = retval;
2870 vl_api_send_msg (reg, (u8 *) rmp);
2875 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2877 stats_main_t *sm = &stats_main;
2878 vpe_client_registration_t rp;
2879 vl_api_want_ip4_fib_stats_reply_t *rmp;
2882 vl_api_registration_t *reg;
2885 fib = ~0; //Using same mechanism as _per_interface_
2886 rp.client_index = mp->client_index;
2887 rp.client_pid = mp->pid;
2889 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2890 mp->enable_disable);
2893 reg = vl_api_client_index_to_registration (mp->client_index);
2897 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2898 fib, mp->client_index);
2902 rmp = vl_msg_api_alloc (sizeof (*rmp));
2903 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2904 rmp->context = mp->context;
2905 rmp->retval = retval;
2907 vl_api_send_msg (reg, (u8 *) rmp);
2911 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2913 stats_main_t *sm = &stats_main;
2914 vpe_client_registration_t rp;
2915 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2918 vl_api_registration_t *reg;
2921 mfib = ~0; //Using same mechanism as _per_interface_
2922 rp.client_index = mp->client_index;
2923 rp.client_pid = mp->pid;
2925 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2926 mp->enable_disable);
2929 reg = vl_api_client_index_to_registration (mp->client_index);
2932 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2933 mfib, mp->client_index);
2937 rmp = vl_msg_api_alloc (sizeof (*rmp));
2938 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2939 rmp->context = mp->context;
2940 rmp->retval = retval;
2942 vl_api_send_msg (reg, (u8 *) rmp);
2946 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2948 stats_main_t *sm = &stats_main;
2949 vpe_client_registration_t rp;
2950 vl_api_want_ip4_fib_stats_reply_t *rmp;
2953 vl_api_registration_t *reg;
2956 fib = ~0; //Using same mechanism as _per_interface_
2957 rp.client_index = mp->client_index;
2958 rp.client_pid = mp->pid;
2960 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2961 mp->enable_disable);
2964 reg = vl_api_client_index_to_registration (mp->client_index);
2967 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2968 fib, mp->client_index);
2972 rmp = vl_msg_api_alloc (sizeof (*rmp));
2973 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2974 rmp->context = mp->context;
2975 rmp->retval = retval;
2977 vl_api_send_msg (reg, (u8 *) rmp);
2981 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2983 stats_main_t *sm = &stats_main;
2984 vpe_client_registration_t rp;
2985 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2988 vl_api_registration_t *reg;
2991 mfib = ~0; //Using same mechanism as _per_interface_
2992 rp.client_index = mp->client_index;
2993 rp.client_pid = mp->pid;
2995 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2996 mp->enable_disable);
2999 reg = vl_api_client_index_to_registration (mp->client_index);
3002 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
3003 mfib, mp->client_index);
3007 rmp = vl_msg_api_alloc (sizeof (*rmp));
3008 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
3009 rmp->context = mp->context;
3010 rmp->retval = retval;
3012 vl_api_send_msg (reg, (u8 *) rmp);
3015 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
3017 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
3022 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
3027 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
3029 stats_main_t *sm = &stats_main;
3030 vnet_interface_main_t *im = sm->interface_main;
3031 vl_api_vnet_get_summary_stats_reply_t *rmp;
3032 vlib_combined_counter_main_t *cm;
3034 vnet_interface_counter_type_t ct;
3036 u64 total_pkts[VNET_N_COMBINED_INTERFACE_COUNTER];
3037 u64 total_bytes[VNET_N_COMBINED_INTERFACE_COUNTER];
3038 vl_api_registration_t *reg;
3040 reg = vl_api_client_index_to_registration (mp->client_index);
3044 rmp = vl_msg_api_alloc (sizeof (*rmp));
3045 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
3046 rmp->context = mp->context;
3049 clib_memset (total_pkts, 0, sizeof (total_pkts));
3050 clib_memset (total_bytes, 0, sizeof (total_bytes));
3052 vnet_interface_counter_lock (im);
3054 vec_foreach (cm, im->combined_sw_if_counters)
3056 which = cm - im->combined_sw_if_counters;
3058 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
3060 vlib_get_combined_counter (cm, i, &v);
3061 total_pkts[which] += v.packets;
3062 total_bytes[which] += v.bytes;
3065 vnet_interface_counter_unlock (im);
3067 foreach_rx_combined_interface_counter (ct)
3069 rmp->total_pkts[ct] = clib_host_to_net_u64 (total_pkts[ct]);
3070 rmp->total_bytes[ct] = clib_host_to_net_u64 (total_bytes[ct]);
3073 foreach_tx_combined_interface_counter (ct)
3075 rmp->total_pkts[ct] = clib_host_to_net_u64 (total_pkts[ct]);
3076 rmp->total_bytes[ct] = clib_host_to_net_u64 (total_bytes[ct]);
3079 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
3081 vl_api_send_msg (reg, (u8 *) rmp);
3085 stats_memclnt_delete_callback (u32 client_index)
3087 vpe_client_stats_registration_t *rp;
3088 stats_main_t *sm = &stats_main;
3092 /* p = hash_get (sm->stats_registration_hash, client_index); */
3095 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
3096 /* pool_put (sm->stats_registrations, rp); */
3097 /* hash_unset (sm->stats_registration_hash, client_index); */
3103 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
3104 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
3105 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
3106 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
3107 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
3108 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
3109 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
3110 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
3111 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
3112 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
3113 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
3114 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
3116 static clib_error_t *
3117 stats_init (vlib_main_t * vm)
3119 stats_main_t *sm = &stats_main;
3120 api_main_t *am = &api_main;
3121 void *vlib_worker_thread_bootstrap_fn (void *arg);
3124 sm->vnet_main = vnet_get_main ();
3125 sm->interface_main = &vnet_get_main ()->interface_main;
3127 sm->stats_poll_interval_in_seconds = 10;
3128 sm->data_structure_lock =
3129 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
3130 CLIB_CACHE_LINE_BYTES);
3131 clib_memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
3134 vl_msg_api_set_handlers(VL_API_##N, #n, \
3135 vl_api_##n##_t_handler, \
3137 vl_api_##n##_t_endian, \
3138 vl_api_##n##_t_print, \
3139 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
3143 /* tell the msg infra not to free these messages... */
3144 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
3145 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
3146 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
3147 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
3148 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
3149 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
3152 * Set up the (msg_name, crc, message-id) table
3154 setup_message_id_table (am);
3156 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
3157 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
3158 #define stats_reg(n) \
3159 sm->stats_registrations[IDX_##n] = 0; \
3160 sm->stats_registration_hash[IDX_##n] = 0;
3161 #include <vpp/stats/stats.reg>
3167 VLIB_INIT_FUNCTION (stats_init);
3170 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
3172 .function = stats_thread_fn,
3175 .no_data_structure_clone = 1,
3181 * fd.io coding-style-patch-verification: ON
3184 * eval: (c-set-style "gnu")