2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
22 #include <vnet/bier/bier_fmask.h>
23 #include <vnet/bier/bier_table.h>
24 #include <vnet/fib/fib_api.h>
28 stats_main_t stats_main;
30 #include <vnet/ip/ip.h>
32 #include <vpp/api/vpe_msg_enum.h>
35 #define f64_print(a,b)
37 #define vl_typedefs /* define message structures */
38 #include <vpp/api/vpe_all_api_h.h>
41 #define vl_endianfun /* define message structures */
42 #include <vpp/api/vpe_all_api_h.h>
45 /* instantiate all the print functions we know about */
46 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
48 #include <vpp/api/vpe_all_api_h.h>
51 #define foreach_stats_msg \
52 _(WANT_STATS, want_stats) \
53 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
54 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
55 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
56 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
57 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
58 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
59 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
60 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
61 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
62 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
63 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
64 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
65 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
66 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
67 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
68 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
69 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
70 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
71 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats) \
72 _(WANT_BIER_NEIGHBOR_STATS, want_bier_neighbor_stats)
74 #define vl_msg_name_crc_list
75 #include <vpp/stats/stats.api.h>
76 #undef vl_msg_name_crc_list
79 setup_message_id_table (api_main_t * am)
82 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
83 foreach_vl_msg_name_crc_stats;
87 /* These constants ensure msg sizes <= 1024, aka ring allocation */
88 #define SIMPLE_COUNTER_BATCH_SIZE 126
89 #define COMBINED_COUNTER_BATCH_SIZE 63
90 #define IP4_FIB_COUNTER_BATCH_SIZE 48
91 #define IP6_FIB_COUNTER_BATCH_SIZE 30
92 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
93 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
94 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
95 #define BIER_NEIGHBOR_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_bier_neighbor_counter_t))
98 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
102 format_vnet_interface_combined_counters (u8 * s, va_list * args)
104 stats_main_t *sm = &stats_main;
105 vl_api_vnet_interface_combined_counters_t *mp =
106 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
109 u32 count, sw_if_index;
111 count = ntohl (mp->count);
112 sw_if_index = ntohl (mp->first_sw_if_index);
116 vp = (vlib_counter_t *) mp->data;
118 switch (mp->vnet_counter_type)
120 case VNET_INTERFACE_COUNTER_RX:
123 case VNET_INTERFACE_COUNTER_TX:
127 counter_name = "bogus";
130 for (i = 0; i < count; i++)
132 packets = clib_mem_unaligned (&vp->packets, u64);
133 packets = clib_net_to_host_u64 (packets);
134 bytes = clib_mem_unaligned (&vp->bytes, u64);
135 bytes = clib_net_to_host_u64 (bytes);
137 s = format (s, "%U.%s.packets %lld\n",
138 format_vnet_sw_if_index_name,
139 sm->vnet_main, sw_if_index, counter_name, packets);
140 s = format (s, "%U.%s.bytes %lld\n",
141 format_vnet_sw_if_index_name,
142 sm->vnet_main, sw_if_index, counter_name, bytes);
149 format_vnet_interface_simple_counters (u8 * s, va_list * args)
151 stats_main_t *sm = &stats_main;
152 vl_api_vnet_interface_simple_counters_t *mp =
153 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
155 u32 count, sw_if_index;
156 count = ntohl (mp->count);
157 sw_if_index = ntohl (mp->first_sw_if_index);
159 vp = (u64 *) mp->data;
162 switch (mp->vnet_counter_type)
164 case VNET_INTERFACE_COUNTER_DROP:
165 counter_name = "drop";
167 case VNET_INTERFACE_COUNTER_PUNT:
168 counter_name = "punt";
170 case VNET_INTERFACE_COUNTER_IP4:
171 counter_name = "ip4";
173 case VNET_INTERFACE_COUNTER_IP6:
174 counter_name = "ip6";
176 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
177 counter_name = "rx-no-buff";
179 case VNET_INTERFACE_COUNTER_RX_MISS:
180 counter_name = "rx-miss";
182 case VNET_INTERFACE_COUNTER_RX_ERROR:
183 counter_name = "rx-error (fifo-full)";
185 case VNET_INTERFACE_COUNTER_TX_ERROR:
186 counter_name = "tx-error (fifo-full)";
189 counter_name = "bogus";
192 for (i = 0; i < count; i++)
194 v = clib_mem_unaligned (vp, u64);
195 v = clib_net_to_host_u64 (v);
197 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
198 sm->vnet_main, sw_if_index, counter_name, v);
206 dslock (stats_main_t * sm, int release_hint, int tag)
209 data_structure_lock_t *l = sm->data_structure_lock;
211 if (PREDICT_FALSE (l == 0))
214 thread_index = vlib_get_thread_index ();
215 if (l->lock && l->thread_index == thread_index)
224 while (__sync_lock_test_and_set (&l->lock, 1))
227 l->thread_index = thread_index;
232 stats_dslock_with_hint (int hint, int tag)
234 stats_main_t *sm = &stats_main;
235 dslock (sm, hint, tag);
239 dsunlock (stats_main_t * sm)
242 data_structure_lock_t *l = sm->data_structure_lock;
244 if (PREDICT_FALSE (l == 0))
247 thread_index = vlib_get_thread_index ();
248 ASSERT (l->lock && l->thread_index == thread_index);
254 CLIB_MEMORY_BARRIER ();
260 stats_dsunlock (int hint, int tag)
262 stats_main_t *sm = &stats_main;
266 static vpe_client_registration_t *
267 get_client_for_stat (u32 reg, u32 item, u32 client_index)
269 stats_main_t *sm = &stats_main;
270 vpe_client_stats_registration_t *registration;
273 /* Is there anything listening for item in that reg */
274 p = hash_get (sm->stats_registration_hash[reg], item);
279 /* If there is, is our client_index one of them */
280 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
281 p = hash_get (registration->client_hash, client_index);
286 return pool_elt_at_index (registration->clients, p[0]);
291 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
293 stats_main_t *sm = &stats_main;
294 vpe_client_stats_registration_t *registration;
295 vpe_client_registration_t *cr;
298 /* Is there anything listening for item in that reg */
299 p = hash_get (sm->stats_registration_hash[reg], item);
303 pool_get (sm->stats_registrations[reg], registration);
304 registration->item = item;
305 registration->client_hash = NULL;
306 registration->clients = NULL;
307 hash_set (sm->stats_registration_hash[reg], item,
308 registration - sm->stats_registrations[reg]);
312 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
315 p = hash_get (registration->client_hash, client->client_index);
319 pool_get (registration->clients, cr);
320 cr->client_index = client->client_index;
321 cr->client_pid = client->client_pid;
322 hash_set (registration->client_hash, cr->client_index,
323 cr - registration->clients);
326 return 1; //At least one client is doing something ... poll
330 clear_one_client (u32 reg_index, u32 reg, u32 item, u32 client_index)
332 stats_main_t *sm = &stats_main;
333 vpe_client_stats_registration_t *registration;
334 vpe_client_registration_t *client;
337 registration = pool_elt_at_index (sm->stats_registrations[reg], reg_index);
338 p = hash_get (registration->client_hash, client_index);
342 client = pool_elt_at_index (registration->clients, p[0]);
343 hash_unset (registration->client_hash, client->client_index);
344 pool_put (registration->clients, client);
346 /* Now check if that was the last client for that item */
347 if (0 == pool_elts (registration->clients))
349 hash_unset (sm->stats_registration_hash[reg], item);
350 hash_free (registration->client_hash);
351 pool_free (registration->clients);
352 pool_put (sm->stats_registrations[reg], registration);
358 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
360 stats_main_t *sm = &stats_main;
364 /* Clear the client first */
365 /* Is there anything listening for item in that reg */
366 p = hash_get (sm->stats_registration_hash[reg], item);
371 /* If there is, is our client_index one of them */
372 clear_one_client (p[0], reg, item, client_index);
376 /* Now check if that was the last item in any of the listened to stats */
377 for (i = 0; i < STATS_REG_N_IDX; i++)
379 elts += pool_elts (sm->stats_registrations[i]);
385 clear_client_for_all_stats (u32 client_index)
387 stats_main_t *sm = &stats_main;
388 u32 reg_index, item, reg;
392 vec_foreach_index(reg, sm->stats_registration_hash)
394 hash_foreach(item, reg_index, sm->stats_registration_hash[reg],
396 clear_one_client(reg_index, reg, item, client_index);
402 /* Now check if that was the last item in any of the listened to stats */
403 for (i = 0; i < STATS_REG_N_IDX; i++)
405 elts += pool_elts (sm->stats_registrations[i]);
410 static clib_error_t *
411 want_stats_reaper (u32 client_index)
413 stats_main_t *sm = &stats_main;
415 sm->enable_poller = clear_client_for_all_stats (client_index);
420 VL_MSG_API_REAPER_FUNCTION (want_stats_reaper);
424 * Return a copy of the clients list.
426 vpe_client_registration_t *
427 get_clients_for_stat (u32 reg, u32 item)
429 stats_main_t *sm = &stats_main;
430 vpe_client_registration_t *client, *clients = 0;
431 vpe_client_stats_registration_t *registration;
434 /* Is there anything listening for item in that reg */
435 p = hash_get (sm->stats_registration_hash[reg], item);
440 /* If there is, is our client_index one of them */
441 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
443 vec_reset_length (clients);
446 pool_foreach (client, registration->clients,
448 vec_add1 (clients, *client);}
456 clear_client_reg (u32 ** registrations)
458 /* When registrations[x] is a vector of pool indices
459 here is a good place to clean up the pools
461 #define stats_reg(n) vec_free(registrations[IDX_##n]);
462 #include <vpp/stats/stats.reg>
465 vec_free (registrations);
469 init_client_reg (u32 ** registrations)
473 Initialise the stats registrations for each
474 type of stat a client can register for as well as
475 a vector of "interested" indexes.
476 Initially this is a u32 of either sw_if_index or fib_index
477 but eventually this should migrate to a pool_index (u32)
478 with a type specific pool that can include more complex things
479 such as timing and structured events.
481 vec_validate (registrations, STATS_REG_N_IDX);
482 #define stats_reg(n) \
483 vec_reset_length(registrations[IDX_##n]);
484 #include <vpp/stats/stats.reg>
488 When registrations[x] is a vector of pool indices, here
489 is a good place to init the pools.
491 return registrations;
495 enable_all_client_reg (u32 ** registrations)
499 Enable all stats known by adding
500 ~0 to the index vector. Eventually this
501 should be deprecated.
503 #define stats_reg(n) \
504 vec_add1(registrations[IDX_##n], ~0);
505 #include <vpp/stats/stats.reg>
507 return registrations;
511 do_simple_interface_counters (stats_main_t * sm)
513 vl_api_vnet_interface_simple_counters_t *mp = 0;
514 vnet_interface_main_t *im = sm->interface_main;
515 api_main_t *am = sm->api_main;
516 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
517 svm_queue_t *q = shmem_hdr->vl_input_queue;
518 vlib_simple_counter_main_t *cm;
519 u32 items_this_message = 0;
524 * Prevent interface registration from expanding / moving the vectors...
525 * That tends never to happen, so we can hold this lock for a while.
527 vnet_interface_counter_lock (im);
529 vec_foreach (cm, im->sw_if_counters)
531 n_counts = vlib_simple_counter_n_counters (cm);
532 for (i = 0; i < n_counts; i++)
536 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
539 mp = vl_msg_api_alloc_as_if_client
540 (sizeof (*mp) + items_this_message * sizeof (v));
541 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
542 mp->vnet_counter_type = cm - im->sw_if_counters;
543 mp->first_sw_if_index = htonl (i);
545 vp = (u64 *) mp->data;
547 v = vlib_get_simple_counter (cm, i);
548 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
551 if (mp->count == items_this_message)
553 mp->count = htonl (items_this_message);
554 /* Send to the main thread... */
555 vl_msg_api_send_shmem (q, (u8 *) & mp);
561 vnet_interface_counter_unlock (im);
565 handle_client_registration (vpe_client_registration_t * client, u32 stat,
566 u32 item, int enable_disable)
568 stats_main_t *sm = &stats_main;
569 vpe_client_registration_t *rp, _rp;
571 rp = get_client_for_stat (stat, item, client->client_index);
574 if (enable_disable == 0)
576 if (!rp) // No client to disable
578 clib_warning ("pid %d: already disabled for stats...",
583 clear_client_for_stat (stat, item, client->client_index);
590 rp->client_index = client->client_index;
591 rp->client_pid = client->client_pid;
592 sm->enable_poller = set_client_for_stat (stat, item, rp);
597 /**********************************
598 * ALL Interface Combined stats - to be deprecated
599 **********************************/
602 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
605 vl_api_want_interface_combined_stats_t_handler
606 (vl_api_want_interface_combined_stats_t * mp)
608 stats_main_t *sm = &stats_main;
609 vpe_client_registration_t rp;
610 vl_api_want_interface_combined_stats_reply_t *rmp;
613 vl_api_registration_t *reg;
616 swif = ~0; //Using same mechanism as _per_interface_
617 rp.client_index = mp->client_index;
618 rp.client_pid = mp->pid;
620 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
624 reg = vl_api_client_index_to_registration (mp->client_index);
628 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
633 rmp = vl_msg_api_alloc (sizeof (*rmp));
634 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
635 rmp->context = mp->context;
636 rmp->retval = retval;
638 vl_api_send_msg (reg, (u8 *) rmp);
642 vl_api_vnet_interface_combined_counters_t_handler
643 (vl_api_vnet_interface_combined_counters_t * mp)
645 vpe_client_registration_t *clients, client;
646 stats_main_t *sm = &stats_main;
647 vl_api_registration_t *reg, *reg_prev = NULL;
648 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
652 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
655 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
656 ~0 /*flag for all */ );
658 for (i = 0; i < vec_len (clients); i++)
661 reg = vl_api_client_index_to_registration (client.client_index);
664 if (reg_prev && vl_api_can_send_msg (reg_prev))
666 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
667 clib_memcpy (mp_copy, mp, mp_size);
668 vl_api_send_msg (reg_prev, (u8 *) mp);
676 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
679 if (reg_prev && vl_api_can_send_msg (reg_prev))
681 vl_api_send_msg (reg_prev, (u8 *) mp);
685 vl_msg_api_free (mp);
690 do_combined_interface_counters (stats_main_t * sm)
692 vl_api_vnet_interface_combined_counters_t *mp = 0;
693 vnet_interface_main_t *im = sm->interface_main;
694 api_main_t *am = sm->api_main;
695 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
696 svm_queue_t *q = shmem_hdr->vl_input_queue;
697 vlib_combined_counter_main_t *cm;
698 u32 items_this_message = 0;
699 vlib_counter_t v, *vp = 0;
702 vnet_interface_counter_lock (im);
704 vec_foreach (cm, im->combined_sw_if_counters)
706 n_counts = vlib_combined_counter_n_counters (cm);
707 for (i = 0; i < n_counts; i++)
711 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
714 mp = vl_msg_api_alloc_as_if_client
715 (sizeof (*mp) + items_this_message * sizeof (v));
716 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
717 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
718 mp->first_sw_if_index = htonl (i);
720 vp = (vlib_counter_t *) mp->data;
722 vlib_get_combined_counter (cm, i, &v);
723 clib_mem_unaligned (&vp->packets, u64)
724 = clib_host_to_net_u64 (v.packets);
725 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
728 if (mp->count == items_this_message)
730 mp->count = htonl (items_this_message);
731 /* Send to the main thread... */
732 vl_msg_api_send_shmem (q, (u8 *) & mp);
738 vnet_interface_counter_unlock (im);
741 /**********************************
742 * Per Interface Combined stats
743 **********************************/
745 /* Request from client registering interfaces it wants */
747 vl_api_want_per_interface_combined_stats_t_handler
748 (vl_api_want_per_interface_combined_stats_t * mp)
750 stats_main_t *sm = &stats_main;
751 vpe_client_registration_t rp;
752 vl_api_want_per_interface_combined_stats_reply_t *rmp;
753 vlib_combined_counter_main_t *cm;
756 vl_api_registration_t *reg;
757 u32 i, swif, num = 0;
759 num = ntohl (mp->num);
762 * Validate sw_if_indexes before registering
764 for (i = 0; i < num; i++)
766 swif = ntohl (mp->sw_ifs[i]);
769 * Check its a real sw_if_index that the client is allowed to see
773 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
775 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
781 for (i = 0; i < num; i++)
783 swif = ntohl (mp->sw_ifs[i]);
785 rp.client_index = mp->client_index;
786 rp.client_pid = mp->pid;
787 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
788 swif, ntohl (mp->enable_disable));
792 reg = vl_api_client_index_to_registration (mp->client_index);
795 for (i = 0; i < num; i++)
797 swif = ntohl (mp->sw_ifs[i]);
800 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
806 rmp = vl_msg_api_alloc (sizeof (*rmp));
807 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
808 rmp->context = mp->context;
809 rmp->retval = retval;
811 vl_api_send_msg (reg, (u8 *) rmp);
814 /* Per Interface Combined distribution to client */
816 do_combined_per_interface_counters (stats_main_t * sm)
818 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
819 vnet_interface_main_t *im = sm->interface_main;
820 api_main_t *am = sm->api_main;
821 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
822 vl_api_registration_t *vl_reg;
823 vlib_combined_counter_main_t *cm;
824 vl_api_vnet_combined_counter_t *vp = 0;
827 vpe_client_stats_registration_t *reg;
828 vpe_client_registration_t *client;
829 u32 *sw_if_index = 0;
831 vnet_interface_counter_lock (im);
833 vec_reset_length (sm->regs_tmp);
837 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
838 ({ vec_add1 (sm->regs_tmp, reg); }));
841 for (i = 0; i < vec_len (sm->regs_tmp); i++)
843 reg = sm->regs_tmp[i];
846 vnet_interface_counter_unlock (im);
847 do_combined_interface_counters (sm);
848 vnet_interface_counter_lock (im);
851 vec_reset_length (sm->clients_tmp);
854 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
858 for (j = 0; j < vec_len (sm->clients_tmp); j++)
860 client = sm->clients_tmp[j];
862 vl_reg = vl_api_client_index_to_registration (client->client_index);
864 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
868 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
869 reg->item, client->client_index);
872 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
873 memset (mp, 0, sizeof (*mp));
876 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
879 * count will eventually be used to optimise the batching
880 * of per client messages for each stat. For now setting this to 1 then
881 * iterate. This will not affect API.
883 * FIXME instead of enqueueing here, this should be sent to a batch
884 * storer for per-client transmission. Each "mp" sent would be a single entry
885 * and if a client is listening to other sw_if_indexes for same, it would be
886 * appended to that *mp
890 * - capturing the timestamp of the counters "when VPP knew them" is important.
891 * Less so is that the timing of the delivery to the control plane be in the same
894 * i.e. As long as the control plane can delta messages from VPP and work out
895 * velocity etc based on the timestamp, it can do so in a more "batch mode".
897 * It would be beneficial to keep a "per-client" message queue, and then
898 * batch all the stat messages for a client into one message, with
899 * discrete timestamps.
901 * Given this particular API is for "per interface" one assumes that the scale
902 * is less than the ~0 case, which the prior API is suited for.
906 * 1 message per api call for now
908 mp->count = htonl (1);
909 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
911 vp = (vl_api_vnet_combined_counter_t *) mp->data;
912 vp->sw_if_index = htonl (reg->item);
914 im = &vnet_get_main ()->interface_main;
917 cm = im->combined_sw_if_counters + X; \
918 vlib_get_combined_counter (cm, reg->item, &v); \
919 clib_mem_unaligned (&vp->x##_packets, u64) = \
920 clib_host_to_net_u64 (v.packets); \
921 clib_mem_unaligned (&vp->x##_bytes, u64) = \
922 clib_host_to_net_u64 (v.bytes);
925 _(VNET_INTERFACE_COUNTER_RX, rx);
926 _(VNET_INTERFACE_COUNTER_TX, tx);
927 _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
928 _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
929 _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
930 _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
931 _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
932 _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
936 vl_api_send_msg (vl_reg, (u8 *) mp);
940 vnet_interface_counter_unlock (im);
943 /**********************************
944 * Per Interface simple stats
945 **********************************/
947 /* Request from client registering interfaces it wants */
949 vl_api_want_per_interface_simple_stats_t_handler
950 (vl_api_want_per_interface_simple_stats_t * mp)
952 stats_main_t *sm = &stats_main;
953 vpe_client_registration_t rp;
954 vl_api_want_per_interface_simple_stats_reply_t *rmp;
955 vlib_simple_counter_main_t *cm;
958 vl_api_registration_t *reg;
959 u32 i, swif, num = 0;
961 num = ntohl (mp->num);
963 for (i = 0; i < num; i++)
965 swif = ntohl (mp->sw_ifs[i]);
967 /* Check its a real sw_if_index that the client is allowed to see */
970 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
972 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
978 for (i = 0; i < num; i++)
980 swif = ntohl (mp->sw_ifs[i]);
982 rp.client_index = mp->client_index;
983 rp.client_pid = mp->pid;
984 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
985 swif, ntohl (mp->enable_disable));
989 reg = vl_api_client_index_to_registration (mp->client_index);
991 /* Client may have disconnected abruptly, clean up */
994 for (i = 0; i < num; i++)
996 swif = ntohl (mp->sw_ifs[i]);
998 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
1006 rmp = vl_msg_api_alloc (sizeof (*rmp));
1007 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
1008 rmp->context = mp->context;
1009 rmp->retval = retval;
1011 vl_api_send_msg (reg, (u8 *) rmp);
1014 /* Per Interface Simple distribution to client */
1016 do_simple_per_interface_counters (stats_main_t * sm)
1018 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
1019 vnet_interface_main_t *im = sm->interface_main;
1020 api_main_t *am = sm->api_main;
1021 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1022 vl_api_registration_t *vl_reg;
1023 vlib_simple_counter_main_t *cm;
1025 vpe_client_stats_registration_t *reg;
1026 vpe_client_registration_t *client;
1027 u32 timestamp, count;
1028 vl_api_vnet_simple_counter_t *vp = 0;
1031 vnet_interface_counter_lock (im);
1033 vec_reset_length (sm->regs_tmp);
1037 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
1038 ({ vec_add1 (sm->regs_tmp, reg); }));
1041 for (i = 0; i < vec_len (sm->regs_tmp); i++)
1043 reg = sm->regs_tmp[i];
1044 if (reg->item == ~0)
1046 vnet_interface_counter_unlock (im);
1047 do_simple_interface_counters (sm);
1048 vnet_interface_counter_lock (im);
1051 vec_reset_length (sm->clients_tmp);
1054 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
1058 for (j = 0; j < vec_len (sm->clients_tmp); j++)
1060 client = sm->clients_tmp[j];
1061 vl_reg = vl_api_client_index_to_registration (client->client_index);
1063 /* Client may have disconnected abrubtly, clean up */
1067 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1068 reg->item, client->client_index);
1072 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1073 memset (mp, 0, sizeof (*mp));
1074 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1077 * count will eventually be used to optimise the batching
1078 * of per client messages for each stat. For now setting this to 1 then
1079 * iterate. This will not affect API.
1081 * FIXME instead of enqueueing here, this should be sent to a batch
1082 * storer for per-client transmission. Each "mp" sent would be a single entry
1083 * and if a client is listening to other sw_if_indexes for same, it would be
1084 * appended to that *mp
1088 * - capturing the timestamp of the counters "when VPP knew them" is important.
1089 * Less so is that the timing of the delivery to the control plane be in the same
1092 * i.e. As long as the control plane can delta messages from VPP and work out
1093 * velocity etc based on the timestamp, it can do so in a more "batch mode".
1095 * It would be beneficial to keep a "per-client" message queue, and then
1096 * batch all the stat messages for a client into one message, with
1097 * discrete timestamps.
1099 * Given this particular API is for "per interface" one assumes that the scale
1100 * is less than the ~0 case, which the prior API is suited for.
1104 * 1 message per api call for now
1106 mp->count = htonl (1);
1107 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1108 vp = (vl_api_vnet_simple_counter_t *) mp->data;
1110 vp->sw_if_index = htonl (reg->item);
1112 // VNET_INTERFACE_COUNTER_DROP
1113 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1114 v = vlib_get_simple_counter (cm, reg->item);
1115 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1117 // VNET_INTERFACE_COUNTER_PUNT
1118 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1119 v = vlib_get_simple_counter (cm, reg->item);
1120 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1122 // VNET_INTERFACE_COUNTER_IP4
1123 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1124 v = vlib_get_simple_counter (cm, reg->item);
1125 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1127 //VNET_INTERFACE_COUNTER_IP6
1128 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1129 v = vlib_get_simple_counter (cm, reg->item);
1130 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1132 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1133 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1134 v = vlib_get_simple_counter (cm, reg->item);
1135 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1136 clib_host_to_net_u64 (v);
1138 //VNET_INTERFACE_COUNTER_RX_MISS
1139 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1140 v = vlib_get_simple_counter (cm, reg->item);
1141 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1143 //VNET_INTERFACE_COUNTER_RX_ERROR
1144 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1145 v = vlib_get_simple_counter (cm, reg->item);
1146 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1148 //VNET_INTERFACE_COUNTER_TX_ERROR
1149 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1150 v = vlib_get_simple_counter (cm, reg->item);
1151 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1153 //VNET_INTERFACE_COUNTER_MPLS
1154 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1155 v = vlib_get_simple_counter (cm, reg->item);
1156 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1158 vl_api_send_msg (vl_reg, (u8 *) mp);
1162 vnet_interface_counter_unlock (im);
1165 /**********************************
1167 **********************************/
1170 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1172 struct timespec _req, *req = &_req;
1173 struct timespec _rem, *rem = &_rem;
1176 req->tv_nsec = nsec;
1179 if (nanosleep (req, rem) == 0)
1184 clib_unix_warning ("nanosleep");
1190 * @brief The context passed when collecting adjacency counters
1192 typedef struct ip4_nbr_stats_ctx_t_
1195 * The SW IF index all these adjs belong to
1200 * A vector of ip4 nbr counters
1202 vl_api_ip4_nbr_counter_t *counters;
1203 } ip4_nbr_stats_ctx_t;
1205 static adj_walk_rc_t
1206 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1208 vl_api_ip4_nbr_counter_t *vl_counter;
1209 vlib_counter_t adj_counter;
1210 ip4_nbr_stats_ctx_t *ctx;
1211 ip_adjacency_t *adj;
1214 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1216 if (0 != adj_counter.packets)
1218 vec_add2 (ctx->counters, vl_counter, 1);
1221 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1222 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1223 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1224 vl_counter->link_type = adj->ia_link;
1226 return (ADJ_WALK_RC_CONTINUE);
1229 #define MIN(x,y) (((x)<(y))?(x):(y))
1232 send_and_pause (stats_main_t * sm, svm_queue_t * q, u8 * mp)
1237 pause = svm_queue_is_full (q);
1239 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1240 svm_queue_unlock (q);
1244 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1245 STATS_RELEASE_DELAY_NS);
1249 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1251 api_main_t *am = sm->api_main;
1252 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1253 svm_queue_t *q = shmem_hdr->vl_input_queue;
1254 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1258 * If the walk context has counters, which may be left over from the last
1259 * suspend, then we continue from there.
1261 while (0 != vec_len (ctx->counters))
1263 u32 n_items = MIN (vec_len (ctx->counters),
1264 IP4_FIB_COUNTER_BATCH_SIZE);
1267 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1269 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1272 (vl_api_ip4_nbr_counter_t)));
1273 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1274 mp->count = ntohl (n_items);
1275 mp->sw_if_index = ntohl (ctx->sw_if_index);
1280 * copy the counters from the back of the context, then we can easily
1281 * 'erase' them by resetting the vector length.
1282 * The order we push the stats to the caller is not important.
1285 &ctx->counters[vec_len (ctx->counters) - n_items],
1286 n_items * sizeof (*ctx->counters));
1288 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1293 send_and_pause (sm, q, (u8 *) & mp);
1298 do_ip4_nbr_counters (stats_main_t * sm)
1300 vnet_main_t *vnm = vnet_get_main ();
1301 vnet_interface_main_t *im = &vnm->interface_main;
1302 vnet_sw_interface_t *si;
1304 ip4_nbr_stats_ctx_t ctx = {
1310 pool_foreach (si, im->sw_interfaces,
1313 * update the interface we are now concerned with
1315 ctx.sw_if_index = si->sw_if_index;
1318 * we are about to walk another interface, so we shouldn't have any pending
1321 ASSERT(ctx.counters == NULL);
1324 * visit each neighbour adjacency on the interface and collect
1325 * its current stats.
1326 * Because we hold the lock the walk is synchronous, so safe to routing
1327 * updates. It's limited in work by the number of adjacenies on an
1328 * interface, which is typically not huge.
1330 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1331 adj_nbr_walk (si->sw_if_index,
1338 * if this interface has some adjacencies with counters then ship them,
1339 * else continue to the next interface.
1341 if (NULL != ctx.counters)
1343 ip4_nbr_ship(sm, &ctx);
1350 * @brief The context passed when collecting adjacency counters
1352 typedef struct ip6_nbr_stats_ctx_t_
1355 * The SW IF index all these adjs belong to
1360 * A vector of ip6 nbr counters
1362 vl_api_ip6_nbr_counter_t *counters;
1363 } ip6_nbr_stats_ctx_t;
1365 static adj_walk_rc_t
1366 ip6_nbr_stats_cb (adj_index_t ai,
1369 vl_api_ip6_nbr_counter_t *vl_counter;
1370 vlib_counter_t adj_counter;
1371 ip6_nbr_stats_ctx_t *ctx;
1372 ip_adjacency_t *adj;
1375 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1377 if (0 != adj_counter.packets)
1379 vec_add2(ctx->counters, vl_counter, 1);
1382 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1383 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1384 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1385 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1386 vl_counter->link_type = adj->ia_link;
1388 return (ADJ_WALK_RC_CONTINUE);
1391 #define MIN(x,y) (((x)<(y))?(x):(y))
1394 ip6_nbr_ship (stats_main_t * sm,
1395 ip6_nbr_stats_ctx_t *ctx)
1397 api_main_t *am = sm->api_main;
1398 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1399 svm_queue_t *q = shmem_hdr->vl_input_queue;
1400 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1404 * If the walk context has counters, which may be left over from the last
1405 * suspend, then we continue from there.
1407 while (0 != vec_len(ctx->counters))
1409 u32 n_items = MIN (vec_len (ctx->counters),
1410 IP6_FIB_COUNTER_BATCH_SIZE);
1413 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1415 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1418 (vl_api_ip6_nbr_counter_t)));
1419 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1420 mp->count = ntohl (n_items);
1421 mp->sw_if_index = ntohl (ctx->sw_if_index);
1426 * copy the counters from the back of the context, then we can easily
1427 * 'erase' them by resetting the vector length.
1428 * The order we push the stats to the caller is not important.
1431 &ctx->counters[vec_len (ctx->counters) - n_items],
1432 n_items * sizeof (*ctx->counters));
1434 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1439 send_and_pause(sm, q, (u8 *) & mp);
1444 do_ip6_nbr_counters (stats_main_t * sm)
1446 vnet_main_t *vnm = vnet_get_main ();
1447 vnet_interface_main_t *im = &vnm->interface_main;
1448 vnet_sw_interface_t *si;
1450 ip6_nbr_stats_ctx_t ctx = {
1456 pool_foreach (si, im->sw_interfaces,
1459 * update the interface we are now concerned with
1461 ctx.sw_if_index = si->sw_if_index;
1464 * we are about to walk another interface, so we shouldn't have any pending
1467 ASSERT(ctx.counters == NULL);
1470 * visit each neighbour adjacency on the interface and collect
1471 * its current stats.
1472 * Because we hold the lock the walk is synchronous, so safe to routing
1473 * updates. It's limited in work by the number of adjacenies on an
1474 * interface, which is typically not huge.
1476 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1477 adj_nbr_walk (si->sw_if_index,
1484 * if this interface has some adjacencies with counters then ship them,
1485 * else continue to the next interface.
1487 if (NULL != ctx.counters)
1489 ip6_nbr_ship(sm, &ctx);
1496 do_ip4_fib_counters (stats_main_t * sm)
1498 ip4_main_t *im4 = &ip4_main;
1499 api_main_t *am = sm->api_main;
1500 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1501 svm_queue_t *q = shmem_hdr->vl_input_queue;
1505 do_ip46_fibs_t *do_fibs;
1506 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1507 u32 items_this_message;
1508 vl_api_ip4_fib_counter_t *ctrp = 0;
1509 u32 start_at_fib_index = 0;
1512 do_fibs = &sm->do_ip46_fibs;
1515 vec_reset_length (do_fibs->fibs);
1517 pool_foreach (fib, im4->fibs,
1518 ({vec_add1(do_fibs->fibs,fib);}));
1522 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1524 fib = do_fibs->fibs[j];
1525 /* We may have bailed out due to control-plane activity */
1526 while ((fib - im4->fibs) < start_at_fib_index)
1529 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1533 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1534 mp = vl_msg_api_alloc_as_if_client
1536 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1537 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1539 mp->vrf_id = ntohl (fib->ft_table_id);
1540 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1544 /* happens if the last FIB was empty... */
1545 ASSERT (mp->count == 0);
1546 mp->vrf_id = ntohl (fib->ft_table_id);
1549 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1551 vec_reset_length (do_fibs->ip4routes);
1552 vec_reset_length (do_fibs->results);
1554 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1556 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1560 vec_reset_length (do_fibs->pvec);
1562 x.address_length = i;
1564 hash_foreach_pair (p, hash, (
1566 vec_add1 (do_fibs->pvec, p);}
1568 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1570 p = do_fibs->pvec[k];
1571 x.address.data_u32 = p->key;
1572 x.index = p->value[0];
1574 vec_add1 (do_fibs->ip4routes, x);
1575 if (sm->data_structure_lock->release_hint)
1577 start_at_fib_index = fib - im4->fibs;
1579 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1580 STATS_RELEASE_DELAY_NS);
1582 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1588 vec_foreach (r, do_fibs->ip4routes)
1591 const dpo_id_t *dpo_id;
1594 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1595 index = (u32) dpo_id->dpoi_index;
1597 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1600 * If it has actually
1601 * seen at least one packet, send it.
1606 /* already in net byte order */
1607 ctrp->address = r->address.as_u32;
1608 ctrp->address_length = r->address_length;
1609 ctrp->packets = clib_host_to_net_u64 (c.packets);
1610 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1614 if (mp->count == items_this_message)
1616 mp->count = htonl (items_this_message);
1618 * If the main thread's input queue is stuffed,
1619 * drop the data structure lock (which the main thread
1620 * may want), and take a pause.
1623 if (svm_queue_is_full (q))
1626 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1627 svm_queue_unlock (q);
1629 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1630 STATS_RELEASE_DELAY_NS);
1633 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1634 svm_queue_unlock (q);
1636 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1637 mp = vl_msg_api_alloc_as_if_client
1639 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1640 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1642 mp->vrf_id = ntohl (fib->ft_table_id);
1643 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1645 } /* for each (mp or single) adj */
1646 if (sm->data_structure_lock->release_hint)
1648 start_at_fib_index = fib - im4->fibs;
1650 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1652 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1655 } /* vec_foreach (routes) */
1659 /* Flush any data from this fib */
1662 mp->count = htonl (mp->count);
1663 vl_msg_api_send_shmem (q, (u8 *) & mp);
1668 /* If e.g. the last FIB had no reportable routes, free the buffer */
1670 vl_msg_api_free (mp);
1674 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1676 stats_main_t *sm = ctx;
1677 do_ip46_fibs_t *do_fibs;
1678 mfib_entry_t *entry;
1680 do_fibs = &sm->do_ip46_fibs;
1681 entry = mfib_entry_get (fei);
1683 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1689 do_ip4_mfib_counters (stats_main_t * sm)
1691 ip4_main_t *im4 = &ip4_main;
1692 api_main_t *am = sm->api_main;
1693 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1694 svm_queue_t *q = shmem_hdr->vl_input_queue;
1697 do_ip46_fibs_t *do_fibs;
1698 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1699 u32 items_this_message;
1700 vl_api_ip4_mfib_counter_t *ctrp = 0;
1701 u32 start_at_mfib_index = 0;
1704 do_fibs = &sm->do_ip46_fibs;
1706 vec_reset_length (do_fibs->mfibs);
1708 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1711 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1713 mfib = do_fibs->mfibs[j];
1714 /* We may have bailed out due to control-plane activity */
1715 while ((mfib - im4->mfibs) < start_at_mfib_index)
1720 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1721 mp = vl_msg_api_alloc_as_if_client
1723 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1724 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1726 mp->vrf_id = ntohl (mfib->mft_table_id);
1727 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1731 /* happens if the last MFIB was empty... */
1732 ASSERT (mp->count == 0);
1733 mp->vrf_id = ntohl (mfib->mft_table_id);
1736 vec_reset_length (do_fibs->mroutes);
1739 * walk the table with table updates blocked
1741 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1743 mfib_table_walk (mfib->mft_index,
1744 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1747 vec_foreach (pfx, do_fibs->mroutes)
1749 const dpo_id_t *dpo_id;
1750 fib_node_index_t mfei;
1755 * re-lookup the entry, since we suspend during the collection
1757 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1759 if (FIB_NODE_INDEX_INVALID == mfei)
1762 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1763 index = (u32) dpo_id->dpoi_index;
1765 vlib_get_combined_counter (&replicate_main.repm_counters,
1766 dpo_id->dpoi_index, &c);
1768 * If it has seen at least one packet, send it.
1772 /* already in net byte order */
1773 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1774 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1775 ctrp->group_length = pfx->fp_len;
1776 ctrp->packets = clib_host_to_net_u64 (c.packets);
1777 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1781 if (mp->count == items_this_message)
1783 mp->count = htonl (items_this_message);
1785 * If the main thread's input queue is stuffed,
1786 * drop the data structure lock (which the main thread
1787 * may want), and take a pause.
1791 while (svm_queue_is_full (q))
1793 svm_queue_unlock (q);
1794 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1795 STATS_RELEASE_DELAY_NS);
1798 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1799 svm_queue_unlock (q);
1801 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1802 mp = vl_msg_api_alloc_as_if_client
1804 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1805 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1807 mp->vrf_id = ntohl (mfib->mft_table_id);
1808 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1813 /* Flush any data from this mfib */
1816 mp->count = htonl (mp->count);
1817 vl_msg_api_send_shmem (q, (u8 *) & mp);
1822 /* If e.g. the last FIB had no reportable routes, free the buffer */
1824 vl_msg_api_free (mp);
1828 do_ip6_mfib_counters (stats_main_t * sm)
1830 ip6_main_t *im6 = &ip6_main;
1831 api_main_t *am = sm->api_main;
1832 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1833 svm_queue_t *q = shmem_hdr->vl_input_queue;
1836 do_ip46_fibs_t *do_fibs;
1837 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1838 u32 items_this_message;
1839 vl_api_ip6_mfib_counter_t *ctrp = 0;
1840 u32 start_at_mfib_index = 0;
1843 do_fibs = &sm->do_ip46_fibs;
1845 vec_reset_length (do_fibs->mfibs);
1847 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1850 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1852 mfib = do_fibs->mfibs[j];
1853 /* We may have bailed out due to control-plane activity */
1854 while ((mfib - im6->mfibs) < start_at_mfib_index)
1859 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1860 mp = vl_msg_api_alloc_as_if_client
1862 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1863 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1865 mp->vrf_id = ntohl (mfib->mft_table_id);
1866 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1870 /* happens if the last MFIB was empty... */
1871 ASSERT (mp->count == 0);
1872 mp->vrf_id = ntohl (mfib->mft_table_id);
1875 vec_reset_length (do_fibs->mroutes);
1878 * walk the table with table updates blocked
1880 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1882 mfib_table_walk (mfib->mft_index,
1883 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1886 vec_foreach (pfx, do_fibs->mroutes)
1888 const dpo_id_t *dpo_id;
1889 fib_node_index_t mfei;
1894 * re-lookup the entry, since we suspend during the collection
1896 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1898 if (FIB_NODE_INDEX_INVALID == mfei)
1901 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1902 index = (u32) dpo_id->dpoi_index;
1904 vlib_get_combined_counter (&replicate_main.repm_counters,
1905 dpo_id->dpoi_index, &c);
1907 * If it has seen at least one packet, send it.
1911 /* already in net byte order */
1912 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1913 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1914 ctrp->group_length = pfx->fp_len;
1915 ctrp->packets = clib_host_to_net_u64 (c.packets);
1916 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1920 if (mp->count == items_this_message)
1922 mp->count = htonl (items_this_message);
1924 * If the main thread's input queue is stuffed,
1925 * drop the data structure lock (which the main thread
1926 * may want), and take a pause.
1930 while (svm_queue_is_full (q))
1932 svm_queue_unlock (q);
1933 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1934 STATS_RELEASE_DELAY_NS);
1937 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1938 svm_queue_unlock (q);
1940 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1941 mp = vl_msg_api_alloc_as_if_client
1943 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1944 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1946 mp->vrf_id = ntohl (mfib->mft_table_id);
1947 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1952 /* Flush any data from this mfib */
1955 mp->count = htonl (mp->count);
1956 vl_msg_api_send_shmem (q, (u8 *) & mp);
1961 /* If e.g. the last FIB had no reportable routes, free the buffer */
1963 vl_msg_api_free (mp);
1969 ip6_route_t **routep;
1971 } add_routes_in_fib_arg_t;
1974 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1976 add_routes_in_fib_arg_t *ap = arg;
1977 stats_main_t *sm = ap->sm;
1979 if (sm->data_structure_lock->release_hint)
1980 clib_longjmp (&sm->jmp_buf, 1);
1982 if (kvp->key[2] >> 32 == ap->fib_index)
1984 ip6_address_t *addr;
1986 addr = (ip6_address_t *) kvp;
1987 vec_add2 (*ap->routep, r, 1);
1988 r->address = addr[0];
1989 r->address_length = kvp->key[2] & 0xFF;
1990 r->index = kvp->value;
1995 do_ip6_fib_counters (stats_main_t * sm)
1997 ip6_main_t *im6 = &ip6_main;
1998 api_main_t *am = sm->api_main;
1999 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
2000 svm_queue_t *q = shmem_hdr->vl_input_queue;
2003 do_ip46_fibs_t *do_fibs;
2004 vl_api_vnet_ip6_fib_counters_t *mp = 0;
2005 u32 items_this_message;
2006 vl_api_ip6_fib_counter_t *ctrp = 0;
2007 u32 start_at_fib_index = 0;
2008 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
2009 add_routes_in_fib_arg_t _a, *a = &_a;
2012 do_fibs = &sm->do_ip46_fibs;
2014 vec_reset_length (do_fibs->fibs);
2016 pool_foreach (fib, im6->fibs,
2017 ({vec_add1(do_fibs->fibs,fib);}));
2021 for (i = 0; i < vec_len (do_fibs->fibs); i++)
2023 fib = do_fibs->fibs[i];
2024 /* We may have bailed out due to control-plane activity */
2025 while ((fib - im6->fibs) < start_at_fib_index)
2030 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2031 mp = vl_msg_api_alloc_as_if_client
2033 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2034 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2036 mp->vrf_id = ntohl (fib->ft_table_id);
2037 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2040 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2042 vec_reset_length (do_fibs->ip6routes);
2043 vec_reset_length (do_fibs->results);
2045 a->fib_index = fib - im6->fibs;
2046 a->routep = &do_fibs->ip6routes;
2049 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
2051 start_at_fib_index = fib - im6->fibs;
2052 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
2057 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2058 STATS_RELEASE_DELAY_NS);
2060 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2064 vec_foreach (r, do_fibs->ip6routes)
2068 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2071 * If it has actually
2072 * seen at least one packet, send it.
2076 /* already in net byte order */
2077 ctrp->address[0] = r->address.as_u64[0];
2078 ctrp->address[1] = r->address.as_u64[1];
2079 ctrp->address_length = (u8) r->address_length;
2080 ctrp->packets = clib_host_to_net_u64 (c.packets);
2081 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2085 if (mp->count == items_this_message)
2087 mp->count = htonl (items_this_message);
2089 * If the main thread's input queue is stuffed,
2090 * drop the data structure lock (which the main thread
2091 * may want), and take a pause.
2094 if (svm_queue_is_full (q))
2097 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2098 svm_queue_unlock (q);
2100 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2101 STATS_RELEASE_DELAY_NS);
2104 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2105 svm_queue_unlock (q);
2107 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2108 mp = vl_msg_api_alloc_as_if_client
2110 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2111 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2113 mp->vrf_id = ntohl (fib->ft_table_id);
2114 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2118 if (sm->data_structure_lock->release_hint)
2120 start_at_fib_index = fib - im6->fibs;
2122 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2124 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2127 } /* vec_foreach (routes) */
2131 /* Flush any data from this fib */
2134 mp->count = htonl (mp->count);
2135 vl_msg_api_send_shmem (q, (u8 *) & mp);
2140 /* If e.g. the last FIB had no reportable routes, free the buffer */
2142 vl_msg_api_free (mp);
2145 typedef struct udp_encap_stats_walk_t_
2147 vl_api_udp_encap_counter_t *stats;
2148 } udp_encap_stats_walk_t;
2151 udp_encap_stats_walk_cb (index_t uei, void *arg)
2153 udp_encap_stats_walk_t *ctx = arg;
2154 vl_api_udp_encap_counter_t *stat;
2157 ue = udp_encap_get (uei);
2158 vec_add2 (ctx->stats, stat, 1);
2160 stat->id = ue->ue_id;
2161 udp_encap_get_stats (ue->ue_id, &stat->packets, &stat->bytes);
2163 return (WALK_CONTINUE);
2167 udp_encap_ship (udp_encap_stats_walk_t * ctx)
2169 vl_api_vnet_udp_encap_counters_t *mp;
2170 vl_shmem_hdr_t *shmem_hdr;
2178 shmem_hdr = am->shmem_hdr;
2179 q = shmem_hdr->vl_input_queue;
2182 * If the walk context has counters, which may be left over from the last
2183 * suspend, then we continue from there.
2185 while (0 != vec_len (ctx->stats))
2187 u32 n_items = MIN (vec_len (ctx->stats),
2188 UDP_ENCAP_COUNTER_BATCH_SIZE);
2191 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2193 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2196 (vl_api_udp_encap_counter_t)));
2197 mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2198 mp->count = ntohl (n_items);
2201 * copy the counters from the back of the context, then we can easily
2202 * 'erase' them by resetting the vector length.
2203 * The order we push the stats to the caller is not important.
2206 &ctx->stats[vec_len (ctx->stats) - n_items],
2207 n_items * sizeof (*ctx->stats));
2209 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2214 send_and_pause (sm, q, (u8 *) & mp);
2219 do_udp_encap_counters (stats_main_t * sm)
2221 vl_api_udp_encap_counter_t *stat;
2223 udp_encap_stats_walk_t ctx = {
2227 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2228 udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
2231 udp_encap_ship (&ctx);
2234 typedef struct bier_neighbor_stats_walk_t_
2236 vl_api_bier_neighbor_counter_t *stats;
2237 } bier_neighbor_stats_walk_t;
2240 bier_neighbor_stats_walk_cb (index_t bfmi, void *arg)
2242 bier_neighbor_stats_walk_t *ctx = arg;
2243 vl_api_bier_neighbor_counter_t *stat;
2244 fib_route_path_encode_t rpath;
2245 bier_table_id_t btid;
2247 vec_add2 (ctx->stats, stat, 1);
2249 bier_fmask_encode (bfmi, &btid, &rpath);
2251 stat->tbl_id.bt_set = btid.bti_set;
2252 stat->tbl_id.bt_sub_domain = btid.bti_sub_domain;
2253 stat->tbl_id.bt_hdr_len_id = btid.bti_hdr_len;
2254 fib_api_path_encode (&rpath, &stat->path);
2255 bier_fmask_get_stats (bfmi, &stat->packets, &stat->bytes);
2257 return (WALK_CONTINUE);
2261 bier_neighbor_ship (bier_neighbor_stats_walk_t * ctx)
2263 vl_api_vnet_bier_neighbor_counters_t *mp;
2264 vl_shmem_hdr_t *shmem_hdr;
2272 shmem_hdr = am->shmem_hdr;
2273 q = shmem_hdr->vl_input_queue;
2276 * If the walk context has counters, which may be left over from the last
2277 * suspend, then we continue from there.
2279 while (0 != vec_len (ctx->stats))
2281 u32 n_items = MIN (vec_len (ctx->stats),
2282 BIER_NEIGHBOR_COUNTER_BATCH_SIZE);
2285 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2287 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2290 (vl_api_bier_neighbor_counter_t)));
2291 mp->_vl_msg_id = ntohs (VL_API_VNET_BIER_NEIGHBOR_COUNTERS);
2292 mp->count = ntohl (n_items);
2295 * copy the counters from the back of the context, then we can easily
2296 * 'erase' them by resetting the vector length.
2297 * The order we push the stats to the caller is not important.
2300 &ctx->stats[vec_len (ctx->stats) - n_items],
2301 n_items * sizeof (*ctx->stats));
2303 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2308 send_and_pause (sm, q, (u8 *) & mp);
2313 do_bier_neighbor_counters (stats_main_t * sm)
2315 vl_api_bier_neighbor_counter_t *stat;
2317 bier_neighbor_stats_walk_t ctx = {
2321 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2322 bier_fmask_db_walk (bier_neighbor_stats_walk_cb, &ctx);
2325 bier_neighbor_ship (&ctx);
2329 stats_set_poller_delay (u32 poller_delay_sec)
2331 stats_main_t *sm = &stats_main;
2332 if (!poller_delay_sec)
2334 return VNET_API_ERROR_INVALID_ARGUMENT;
2338 sm->stats_poll_interval_in_seconds = poller_delay_sec;
2344 * Accept connection on the socket and exchange the fd for the shared
2347 static clib_error_t *
2348 stats_socket_accept_ready (clib_file_t * uf)
2350 stats_main_t *sm = &stats_main;
2351 ssvm_private_t *ssvmp = &sm->stat_segment;
2353 clib_socket_t client = { 0 };
2355 err = clib_socket_accept (sm->socket, &client);
2358 clib_error_report (err);
2362 /* Send the fd across and close */
2363 err = clib_socket_sendmsg (&client, 0, 0, &ssvmp->fd, 1);
2365 clib_error_report (err);
2366 clib_socket_close (&client);
2372 stats_segment_socket_init (void)
2374 stats_main_t *sm = &stats_main;
2375 clib_error_t *error;
2376 clib_socket_t *s = clib_mem_alloc (sizeof (clib_socket_t));
2378 s->config = (char *) sm->socket_name;
2379 s->flags = CLIB_SOCKET_F_IS_SERVER | CLIB_SOCKET_F_SEQPACKET |
2380 CLIB_SOCKET_F_ALLOW_GROUP_WRITE | CLIB_SOCKET_F_PASSCRED;
2381 if ((error = clib_socket_init (s)))
2383 clib_error_report (error);
2387 clib_file_t template = { 0 };
2388 clib_file_main_t *fm = &file_main;
2389 template.read_function = stats_socket_accept_ready;
2390 template.file_descriptor = s->fd;
2391 template.description =
2392 format (0, "stats segment listener %s", STAT_SEGMENT_SOCKET_FILE);
2393 clib_file_add (fm, &template);
2398 static clib_error_t *
2399 stats_config (vlib_main_t * vm, unformat_input_t * input)
2401 stats_main_t *sm = &stats_main;
2404 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2406 if (unformat (input, "socket-name %s", &sm->socket_name))
2408 else if (unformat (input, "default"))
2409 sm->socket_name = format (0, "%s", STAT_SEGMENT_SOCKET_FILE);
2410 else if (unformat (input, "interval %u", &sec))
2412 int rv = stats_set_poller_delay (sec);
2415 return clib_error_return (0,
2416 "`stats_set_poller_delay' API call failed, rv=%d:%U",
2417 (int) rv, format_vnet_api_errno, rv);
2422 return clib_error_return (0, "unknown input '%U'",
2423 format_unformat_error, input);
2427 if (sm->socket_name)
2428 stats_segment_socket_init ();
2433 /* stats { ... } configuration. */
2436 * @cfgcmd{interval, <seconds>}
2437 * Configure stats poller delay to be @c seconds.
2440 VLIB_CONFIG_FUNCTION (stats_config, "stats");
2443 vl_api_stats_get_poller_delay_t_handler
2444 (vl_api_stats_get_poller_delay_t * mp)
2446 stats_main_t *sm = &stats_main;
2447 vl_api_registration_t *reg;
2448 reg = vl_api_client_index_to_registration (mp->client_index);
2451 vl_api_stats_get_poller_delay_reply_t *rmp;
2453 rmp = vl_msg_api_alloc (sizeof (*rmp));
2454 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
2455 rmp->context = mp->context;
2457 rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2459 vl_api_send_msg (reg, (u8 *) rmp);
2464 stats_thread_fn (void *arg)
2466 stats_main_t *sm = &stats_main;
2467 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2468 vlib_thread_main_t *tm = vlib_get_thread_main ();
2470 /* stats thread wants no signals. */
2474 pthread_sigmask (SIG_SETMASK, &s, 0);
2477 if (vec_len (tm->thread_prefix))
2478 vlib_set_thread_name ((char *)
2479 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2481 clib_mem_set_heap (w->thread_mheap);
2485 ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
2488 /* Always update stats segment data */
2489 do_stat_segment_updates (sm);
2491 if (!(sm->enable_poller))
2495 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2496 do_combined_per_interface_counters (sm);
2499 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2500 do_simple_per_interface_counters (sm);
2502 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2503 do_ip4_fib_counters (sm);
2505 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2506 do_ip6_fib_counters (sm);
2508 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2509 do_ip4_mfib_counters (sm);
2511 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2512 do_ip6_mfib_counters (sm);
2514 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2515 do_ip4_nbr_counters (sm);
2517 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2518 do_ip6_nbr_counters (sm);
2520 if (pool_elts (sm->stats_registrations[IDX_BIER_NEIGHBOR_COUNTERS]))
2521 do_bier_neighbor_counters (sm);
2526 vl_api_vnet_interface_simple_counters_t_handler
2527 (vl_api_vnet_interface_simple_counters_t * mp)
2529 vpe_client_registration_t *clients, client;
2530 stats_main_t *sm = &stats_main;
2531 vl_api_registration_t *reg, *reg_prev = NULL;
2532 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2536 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2539 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2540 ~0 /*flag for all */ );
2542 for (i = 0; i < vec_len (clients); i++)
2544 client = clients[i];
2545 reg = vl_api_client_index_to_registration (client.client_index);
2548 if (reg_prev && vl_api_can_send_msg (reg_prev))
2550 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2551 clib_memcpy (mp_copy, mp, mp_size);
2552 vl_api_send_msg (reg_prev, (u8 *) mp);
2560 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2561 client.client_index);
2568 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2571 if (reg_prev && vl_api_can_send_msg (reg_prev))
2573 vl_api_send_msg (reg_prev, (u8 *) mp);
2577 vl_msg_api_free (mp);
2582 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2584 stats_main_t *sm = &stats_main;
2585 vl_api_registration_t *reg, *reg_prev = NULL;
2586 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2588 vpe_client_registration_t *clients, client;
2591 mp_size = sizeof (*mp_copy) +
2592 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2595 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2597 for (i = 0; i < vec_len (clients); i++)
2599 client = clients[i];
2600 reg = vl_api_client_index_to_registration (client.client_index);
2603 if (reg_prev && vl_api_can_send_msg (reg_prev))
2605 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2606 clib_memcpy (mp_copy, mp, mp_size);
2607 vl_api_send_msg (reg_prev, (u8 *) mp);
2614 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2615 ~0, client.client_index);
2621 if (reg_prev && vl_api_can_send_msg (reg_prev))
2623 vl_api_send_msg (reg_prev, (u8 *) mp);
2627 vl_msg_api_free (mp);
2632 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2634 stats_main_t *sm = &stats_main;
2635 vl_api_registration_t *reg, *reg_prev = NULL;
2636 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2638 vpe_client_registration_t *clients, client;
2641 mp_size = sizeof (*mp_copy) +
2642 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2645 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2647 for (i = 0; i < vec_len (clients); i++)
2649 client = clients[i];
2650 reg = vl_api_client_index_to_registration (client.client_index);
2653 if (reg_prev && vl_api_can_send_msg (reg_prev))
2655 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2656 clib_memcpy (mp_copy, mp, mp_size);
2657 vl_api_send_msg (reg_prev, (u8 *) mp);
2664 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2665 ~0, client.client_index);
2672 if (reg_prev && vl_api_can_send_msg (reg_prev))
2674 vl_api_send_msg (reg_prev, (u8 *) mp);
2678 vl_msg_api_free (mp);
2683 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2685 stats_main_t *sm = &stats_main;
2686 vl_api_registration_t *reg, *reg_prev = NULL;
2687 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2689 vpe_client_registration_t *clients, client;
2692 mp_size = sizeof (*mp_copy) +
2693 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2696 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2698 for (i = 0; i < vec_len (clients); i++)
2700 client = clients[i];
2701 reg = vl_api_client_index_to_registration (client.client_index);
2704 if (reg_prev && vl_api_can_send_msg (reg_prev))
2706 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2707 clib_memcpy (mp_copy, mp, mp_size);
2708 vl_api_send_msg (reg_prev, (u8 *) mp);
2715 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2716 ~0, client.client_index);
2723 if (reg_prev && vl_api_can_send_msg (reg_prev))
2725 vl_api_send_msg (reg_prev, (u8 *) mp);
2729 vl_msg_api_free (mp);
2734 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2736 stats_main_t *sm = &stats_main;
2737 vl_api_registration_t *reg, *reg_prev = NULL;
2738 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2740 vpe_client_registration_t *clients, client;
2743 mp_size = sizeof (*mp_copy) +
2744 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2747 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2749 for (i = 0; i < vec_len (clients); i++)
2751 client = clients[i];
2752 reg = vl_api_client_index_to_registration (client.client_index);
2755 if (reg_prev && vl_api_can_send_msg (reg_prev))
2757 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2758 clib_memcpy (mp_copy, mp, mp_size);
2759 vl_api_send_msg (reg_prev, (u8 *) mp);
2766 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2767 ~0, client.client_index);
2774 if (reg_prev && vl_api_can_send_msg (reg_prev))
2776 vl_api_send_msg (reg_prev, (u8 *) mp);
2780 vl_msg_api_free (mp);
2785 vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
2787 stats_main_t *sm = &stats_main;
2788 vpe_client_registration_t rp;
2789 vl_api_want_udp_encap_stats_reply_t *rmp;
2792 vl_api_registration_t *reg;
2795 fib = ~0; //Using same mechanism as _per_interface_
2796 rp.client_index = mp->client_index;
2797 rp.client_pid = mp->pid;
2799 handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2802 reg = vl_api_client_index_to_registration (mp->client_index);
2806 sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2807 fib, mp->client_index);
2811 rmp = vl_msg_api_alloc (sizeof (*rmp));
2812 rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2813 rmp->context = mp->context;
2814 rmp->retval = retval;
2816 vl_api_send_msg (reg, (u8 *) rmp);
2820 vl_api_want_bier_neighbor_stats_t_handler (vl_api_want_bier_neighbor_stats_t *
2823 stats_main_t *sm = &stats_main;
2824 vpe_client_registration_t rp;
2825 vl_api_want_bier_neighbor_stats_reply_t *rmp;
2828 vl_api_registration_t *reg;
2831 fib = ~0; //Using same mechanism as _per_interface_
2832 rp.client_index = mp->client_index;
2833 rp.client_pid = mp->pid;
2835 handle_client_registration (&rp, IDX_BIER_NEIGHBOR_COUNTERS, fib,
2839 reg = vl_api_client_index_to_registration (mp->client_index);
2843 sm->enable_poller = clear_client_for_stat (IDX_BIER_NEIGHBOR_COUNTERS,
2844 fib, mp->client_index);
2848 rmp = vl_msg_api_alloc (sizeof (*rmp));
2849 rmp->_vl_msg_id = ntohs (VL_API_WANT_BIER_NEIGHBOR_STATS_REPLY);
2850 rmp->context = mp->context;
2851 rmp->retval = retval;
2853 vl_api_send_msg (reg, (u8 *) rmp);
2857 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2859 stats_main_t *sm = &stats_main;
2860 vpe_client_registration_t rp;
2861 vl_api_want_stats_reply_t *rmp;
2865 vl_api_registration_t *reg;
2867 item = ~0; //"ALL THE THINGS IN THE THINGS
2868 rp.client_index = mp->client_index;
2869 rp.client_pid = mp->pid;
2871 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2872 item, mp->enable_disable);
2874 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2875 item, mp->enable_disable);
2877 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2878 item, mp->enable_disable);
2880 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2881 item, mp->enable_disable);
2883 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2884 item, mp->enable_disable);
2886 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2887 item, mp->enable_disable);
2890 reg = vl_api_client_index_to_registration (mp->client_index);
2894 rmp = vl_msg_api_alloc (sizeof (*rmp));
2895 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2896 rmp->context = mp->context;
2897 rmp->retval = retval;
2899 vl_api_send_msg (reg, (u8 *) rmp);
2903 vl_api_want_interface_simple_stats_t_handler
2904 (vl_api_want_interface_simple_stats_t * mp)
2906 stats_main_t *sm = &stats_main;
2907 vpe_client_registration_t rp;
2908 vl_api_want_interface_simple_stats_reply_t *rmp;
2912 vl_api_registration_t *reg;
2914 swif = ~0; //Using same mechanism as _per_interface_
2915 rp.client_index = mp->client_index;
2916 rp.client_pid = mp->pid;
2918 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2919 mp->enable_disable);
2922 reg = vl_api_client_index_to_registration (mp->client_index);
2927 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2932 rmp = vl_msg_api_alloc (sizeof (*rmp));
2933 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2934 rmp->context = mp->context;
2935 rmp->retval = retval;
2937 vl_api_send_msg (reg, (u8 *) rmp);
2942 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2944 stats_main_t *sm = &stats_main;
2945 vpe_client_registration_t rp;
2946 vl_api_want_ip4_fib_stats_reply_t *rmp;
2949 vl_api_registration_t *reg;
2952 fib = ~0; //Using same mechanism as _per_interface_
2953 rp.client_index = mp->client_index;
2954 rp.client_pid = mp->pid;
2956 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2957 mp->enable_disable);
2960 reg = vl_api_client_index_to_registration (mp->client_index);
2964 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2965 fib, mp->client_index);
2969 rmp = vl_msg_api_alloc (sizeof (*rmp));
2970 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2971 rmp->context = mp->context;
2972 rmp->retval = retval;
2974 vl_api_send_msg (reg, (u8 *) rmp);
2978 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2980 stats_main_t *sm = &stats_main;
2981 vpe_client_registration_t rp;
2982 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2985 vl_api_registration_t *reg;
2988 mfib = ~0; //Using same mechanism as _per_interface_
2989 rp.client_index = mp->client_index;
2990 rp.client_pid = mp->pid;
2992 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2993 mp->enable_disable);
2996 reg = vl_api_client_index_to_registration (mp->client_index);
2999 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
3000 mfib, mp->client_index);
3004 rmp = vl_msg_api_alloc (sizeof (*rmp));
3005 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
3006 rmp->context = mp->context;
3007 rmp->retval = retval;
3009 vl_api_send_msg (reg, (u8 *) rmp);
3013 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
3015 stats_main_t *sm = &stats_main;
3016 vpe_client_registration_t rp;
3017 vl_api_want_ip4_fib_stats_reply_t *rmp;
3020 vl_api_registration_t *reg;
3023 fib = ~0; //Using same mechanism as _per_interface_
3024 rp.client_index = mp->client_index;
3025 rp.client_pid = mp->pid;
3027 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
3028 mp->enable_disable);
3031 reg = vl_api_client_index_to_registration (mp->client_index);
3034 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
3035 fib, mp->client_index);
3039 rmp = vl_msg_api_alloc (sizeof (*rmp));
3040 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
3041 rmp->context = mp->context;
3042 rmp->retval = retval;
3044 vl_api_send_msg (reg, (u8 *) rmp);
3048 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
3050 stats_main_t *sm = &stats_main;
3051 vpe_client_registration_t rp;
3052 vl_api_want_ip4_mfib_stats_reply_t *rmp;
3055 vl_api_registration_t *reg;
3058 mfib = ~0; //Using same mechanism as _per_interface_
3059 rp.client_index = mp->client_index;
3060 rp.client_pid = mp->pid;
3062 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
3063 mp->enable_disable);
3066 reg = vl_api_client_index_to_registration (mp->client_index);
3069 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
3070 mfib, mp->client_index);
3074 rmp = vl_msg_api_alloc (sizeof (*rmp));
3075 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
3076 rmp->context = mp->context;
3077 rmp->retval = retval;
3079 vl_api_send_msg (reg, (u8 *) rmp);
3082 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
3084 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
3089 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
3094 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
3096 stats_main_t *sm = &stats_main;
3097 vnet_interface_main_t *im = sm->interface_main;
3098 vl_api_vnet_get_summary_stats_reply_t *rmp;
3099 vlib_combined_counter_main_t *cm;
3101 vnet_interface_counter_type_t ct;
3103 u64 total_pkts[VNET_N_COMBINED_INTERFACE_COUNTER];
3104 u64 total_bytes[VNET_N_COMBINED_INTERFACE_COUNTER];
3105 vl_api_registration_t *reg;
3107 reg = vl_api_client_index_to_registration (mp->client_index);
3111 rmp = vl_msg_api_alloc (sizeof (*rmp));
3112 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
3113 rmp->context = mp->context;
3116 memset (total_pkts, 0, sizeof (total_pkts));
3117 memset (total_bytes, 0, sizeof (total_bytes));
3119 vnet_interface_counter_lock (im);
3121 vec_foreach (cm, im->combined_sw_if_counters)
3123 which = cm - im->combined_sw_if_counters;
3125 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
3127 vlib_get_combined_counter (cm, i, &v);
3128 total_pkts[which] += v.packets;
3129 total_bytes[which] += v.bytes;
3132 vnet_interface_counter_unlock (im);
3134 foreach_rx_combined_interface_counter (ct)
3136 rmp->total_pkts[ct] = clib_host_to_net_u64 (total_pkts[ct]);
3137 rmp->total_bytes[ct] = clib_host_to_net_u64 (total_bytes[ct]);
3140 foreach_tx_combined_interface_counter (ct)
3142 rmp->total_pkts[ct] = clib_host_to_net_u64 (total_pkts[ct]);
3143 rmp->total_bytes[ct] = clib_host_to_net_u64 (total_bytes[ct]);
3146 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
3148 vl_api_send_msg (reg, (u8 *) rmp);
3152 stats_memclnt_delete_callback (u32 client_index)
3154 vpe_client_stats_registration_t *rp;
3155 stats_main_t *sm = &stats_main;
3159 /* p = hash_get (sm->stats_registration_hash, client_index); */
3162 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
3163 /* pool_put (sm->stats_registrations, rp); */
3164 /* hash_unset (sm->stats_registration_hash, client_index); */
3170 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
3171 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
3172 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
3173 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
3174 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
3175 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
3176 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
3177 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
3178 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
3179 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
3180 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
3181 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
3183 static clib_error_t *
3184 stats_init (vlib_main_t * vm)
3186 stats_main_t *sm = &stats_main;
3187 api_main_t *am = &api_main;
3188 void *vlib_worker_thread_bootstrap_fn (void *arg);
3191 sm->vnet_main = vnet_get_main ();
3192 sm->interface_main = &vnet_get_main ()->interface_main;
3194 sm->stats_poll_interval_in_seconds = 10;
3195 sm->data_structure_lock =
3196 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
3197 CLIB_CACHE_LINE_BYTES);
3198 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
3201 vl_msg_api_set_handlers(VL_API_##N, #n, \
3202 vl_api_##n##_t_handler, \
3204 vl_api_##n##_t_endian, \
3205 vl_api_##n##_t_print, \
3206 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
3210 /* tell the msg infra not to free these messages... */
3211 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
3212 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
3213 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
3214 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
3215 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
3216 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
3219 * Set up the (msg_name, crc, message-id) table
3221 setup_message_id_table (am);
3223 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
3224 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
3225 #define stats_reg(n) \
3226 sm->stats_registrations[IDX_##n] = 0; \
3227 sm->stats_registration_hash[IDX_##n] = 0;
3228 #include <vpp/stats/stats.reg>
3234 VLIB_INIT_FUNCTION (stats_init);
3237 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
3239 .function = stats_thread_fn,
3242 .no_data_structure_clone = 1,
3248 * fd.io coding-style-patch-verification: ON
3251 * eval: (c-set-style "gnu")