2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
25 stats_main_t stats_main;
27 #include <vnet/ip/ip.h>
29 #include <vpp/api/vpe_msg_enum.h>
32 #define f64_print(a,b)
34 #define vl_typedefs /* define message structures */
35 #include <vpp/api/vpe_all_api_h.h>
38 #define vl_endianfun /* define message structures */
39 #include <vpp/api/vpe_all_api_h.h>
42 /* instantiate all the print functions we know about */
43 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
45 #include <vpp/api/vpe_all_api_h.h>
48 #define foreach_stats_msg \
49 _(WANT_STATS, want_stats) \
50 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
51 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
52 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
53 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
55 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
56 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
57 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
58 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
59 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
60 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
61 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
62 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
63 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
64 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
65 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
66 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
67 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
68 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats) \
69 _(MAP_STATS_SEGMENT, map_stats_segment)
71 #define vl_msg_name_crc_list
72 #include <vpp/stats/stats.api.h>
73 #undef vl_msg_name_crc_list
76 setup_message_id_table (api_main_t * am)
79 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
80 foreach_vl_msg_name_crc_stats;
84 /* These constants ensure msg sizes <= 1024, aka ring allocation */
85 #define SIMPLE_COUNTER_BATCH_SIZE 126
86 #define COMBINED_COUNTER_BATCH_SIZE 63
87 #define IP4_FIB_COUNTER_BATCH_SIZE 48
88 #define IP6_FIB_COUNTER_BATCH_SIZE 30
89 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
90 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
91 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
94 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
98 format_vnet_interface_combined_counters (u8 * s, va_list * args)
100 stats_main_t *sm = &stats_main;
101 vl_api_vnet_interface_combined_counters_t *mp =
102 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
105 u32 count, sw_if_index;
107 count = ntohl (mp->count);
108 sw_if_index = ntohl (mp->first_sw_if_index);
112 vp = (vlib_counter_t *) mp->data;
114 switch (mp->vnet_counter_type)
116 case VNET_INTERFACE_COUNTER_RX:
119 case VNET_INTERFACE_COUNTER_TX:
123 counter_name = "bogus";
126 for (i = 0; i < count; i++)
128 packets = clib_mem_unaligned (&vp->packets, u64);
129 packets = clib_net_to_host_u64 (packets);
130 bytes = clib_mem_unaligned (&vp->bytes, u64);
131 bytes = clib_net_to_host_u64 (bytes);
133 s = format (s, "%U.%s.packets %lld\n",
134 format_vnet_sw_if_index_name,
135 sm->vnet_main, sw_if_index, counter_name, packets);
136 s = format (s, "%U.%s.bytes %lld\n",
137 format_vnet_sw_if_index_name,
138 sm->vnet_main, sw_if_index, counter_name, bytes);
145 format_vnet_interface_simple_counters (u8 * s, va_list * args)
147 stats_main_t *sm = &stats_main;
148 vl_api_vnet_interface_simple_counters_t *mp =
149 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
151 u32 count, sw_if_index;
152 count = ntohl (mp->count);
153 sw_if_index = ntohl (mp->first_sw_if_index);
155 vp = (u64 *) mp->data;
158 switch (mp->vnet_counter_type)
160 case VNET_INTERFACE_COUNTER_DROP:
161 counter_name = "drop";
163 case VNET_INTERFACE_COUNTER_PUNT:
164 counter_name = "punt";
166 case VNET_INTERFACE_COUNTER_IP4:
167 counter_name = "ip4";
169 case VNET_INTERFACE_COUNTER_IP6:
170 counter_name = "ip6";
172 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
173 counter_name = "rx-no-buff";
175 case VNET_INTERFACE_COUNTER_RX_MISS:
176 counter_name = "rx-miss";
178 case VNET_INTERFACE_COUNTER_RX_ERROR:
179 counter_name = "rx-error (fifo-full)";
181 case VNET_INTERFACE_COUNTER_TX_ERROR:
182 counter_name = "tx-error (fifo-full)";
185 counter_name = "bogus";
188 for (i = 0; i < count; i++)
190 v = clib_mem_unaligned (vp, u64);
191 v = clib_net_to_host_u64 (v);
193 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
194 sm->vnet_main, sw_if_index, counter_name, v);
202 dslock (stats_main_t * sm, int release_hint, int tag)
205 data_structure_lock_t *l = sm->data_structure_lock;
207 if (PREDICT_FALSE (l == 0))
210 thread_index = vlib_get_thread_index ();
211 if (l->lock && l->thread_index == thread_index)
220 while (__sync_lock_test_and_set (&l->lock, 1))
223 l->thread_index = thread_index;
228 stats_dslock_with_hint (int hint, int tag)
230 stats_main_t *sm = &stats_main;
231 dslock (sm, hint, tag);
235 dsunlock (stats_main_t * sm)
238 data_structure_lock_t *l = sm->data_structure_lock;
240 if (PREDICT_FALSE (l == 0))
243 thread_index = vlib_get_thread_index ();
244 ASSERT (l->lock && l->thread_index == thread_index);
250 CLIB_MEMORY_BARRIER ();
256 stats_dsunlock (int hint, int tag)
258 stats_main_t *sm = &stats_main;
262 static vpe_client_registration_t *
263 get_client_for_stat (u32 reg, u32 item, u32 client_index)
265 stats_main_t *sm = &stats_main;
266 vpe_client_stats_registration_t *registration;
269 /* Is there anything listening for item in that reg */
270 p = hash_get (sm->stats_registration_hash[reg], item);
275 /* If there is, is our client_index one of them */
276 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
277 p = hash_get (registration->client_hash, client_index);
282 return pool_elt_at_index (registration->clients, p[0]);
287 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
289 stats_main_t *sm = &stats_main;
290 vpe_client_stats_registration_t *registration;
291 vpe_client_registration_t *cr;
294 /* Is there anything listening for item in that reg */
295 p = hash_get (sm->stats_registration_hash[reg], item);
299 pool_get (sm->stats_registrations[reg], registration);
300 registration->item = item;
301 hash_set (sm->stats_registration_hash[reg], item,
302 registration - sm->stats_registrations[reg]);
306 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
309 p = hash_get (registration->client_hash, client->client_index);
313 pool_get (registration->clients, cr);
314 cr->client_index = client->client_index;
315 cr->client_pid = client->client_pid;
316 hash_set (registration->client_hash, cr->client_index,
317 cr - registration->clients);
320 return 1; //At least one client is doing something ... poll
324 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
326 stats_main_t *sm = &stats_main;
327 vpe_client_stats_registration_t *registration;
328 vpe_client_registration_t *client;
332 /* Clear the client first */
333 /* Is there anything listening for item in that reg */
334 p = hash_get (sm->stats_registration_hash[reg], item);
339 /* If there is, is our client_index one of them */
340 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
341 p = hash_get (registration->client_hash, client_index);
346 client = pool_elt_at_index (registration->clients, p[0]);
347 hash_unset (registration->client_hash, client->client_index);
348 pool_put (registration->clients, client);
350 /* Now check if that was the last client for that item */
351 if (0 == pool_elts (registration->clients))
353 hash_unset (sm->stats_registration_hash[reg], item);
354 pool_put (sm->stats_registrations[reg], registration);
359 /* Now check if that was the last item in any of the listened to stats */
360 for (i = 0; i < STATS_REG_N_IDX; i++)
362 elts += pool_elts (sm->stats_registrations[i]);
368 * Return a copy of the clients list.
370 vpe_client_registration_t *
371 get_clients_for_stat (u32 reg, u32 item)
373 stats_main_t *sm = &stats_main;
374 vpe_client_registration_t *client, *clients = 0;
375 vpe_client_stats_registration_t *registration;
378 /* Is there anything listening for item in that reg */
379 p = hash_get (sm->stats_registration_hash[reg], item);
384 /* If there is, is our client_index one of them */
385 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
387 vec_reset_length (clients);
390 pool_foreach (client, registration->clients,
392 vec_add1 (clients, *client);}
400 clear_client_reg (u32 ** registrations)
402 /* When registrations[x] is a vector of pool indices
403 here is a good place to clean up the pools
405 #define stats_reg(n) vec_free(registrations[IDX_##n]);
406 #include <vpp/stats/stats.reg>
409 vec_free (registrations);
413 init_client_reg (u32 ** registrations)
417 Initialise the stats registrations for each
418 type of stat a client can register for as well as
419 a vector of "interested" indexes.
420 Initially this is a u32 of either sw_if_index or fib_index
421 but eventually this should migrate to a pool_index (u32)
422 with a type specific pool that can include more complex things
423 such as timing and structured events.
425 vec_validate (registrations, STATS_REG_N_IDX);
426 #define stats_reg(n) \
427 vec_reset_length(registrations[IDX_##n]);
428 #include <vpp/stats/stats.reg>
432 When registrations[x] is a vector of pool indices, here
433 is a good place to init the pools.
435 return registrations;
439 enable_all_client_reg (u32 ** registrations)
443 Enable all stats known by adding
444 ~0 to the index vector. Eventually this
445 should be deprecated.
447 #define stats_reg(n) \
448 vec_add1(registrations[IDX_##n], ~0);
449 #include <vpp/stats/stats.reg>
451 return registrations;
455 do_simple_interface_counters (stats_main_t * sm)
457 vl_api_vnet_interface_simple_counters_t *mp = 0;
458 vnet_interface_main_t *im = sm->interface_main;
459 api_main_t *am = sm->api_main;
460 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
461 svm_queue_t *q = shmem_hdr->vl_input_queue;
462 vlib_simple_counter_main_t *cm;
463 u32 items_this_message = 0;
468 * Prevent interface registration from expanding / moving the vectors...
469 * That tends never to happen, so we can hold this lock for a while.
471 vnet_interface_counter_lock (im);
473 vec_foreach (cm, im->sw_if_counters)
475 n_counts = vlib_simple_counter_n_counters (cm);
476 for (i = 0; i < n_counts; i++)
480 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
483 mp = vl_msg_api_alloc_as_if_client
484 (sizeof (*mp) + items_this_message * sizeof (v));
485 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
486 mp->vnet_counter_type = cm - im->sw_if_counters;
487 mp->first_sw_if_index = htonl (i);
489 vp = (u64 *) mp->data;
491 v = vlib_get_simple_counter (cm, i);
492 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
495 if (mp->count == items_this_message)
497 mp->count = htonl (items_this_message);
498 /* Send to the main thread... */
499 vl_msg_api_send_shmem (q, (u8 *) & mp);
505 vnet_interface_counter_unlock (im);
509 handle_client_registration (vpe_client_registration_t * client, u32 stat,
510 u32 item, int enable_disable)
512 stats_main_t *sm = &stats_main;
513 vpe_client_registration_t *rp, _rp;
515 rp = get_client_for_stat (stat, item, client->client_index);
518 if (enable_disable == 0)
520 if (!rp) // No client to disable
522 clib_warning ("pid %d: already disabled for stats...",
527 clear_client_for_stat (stat, item, client->client_index);
534 rp->client_index = client->client_index;
535 rp->client_pid = client->client_pid;
536 sm->enable_poller = set_client_for_stat (stat, item, rp);
541 /**********************************
542 * ALL Interface Combined stats - to be deprecated
543 **********************************/
546 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
549 vl_api_want_interface_combined_stats_t_handler
550 (vl_api_want_interface_combined_stats_t * mp)
552 stats_main_t *sm = &stats_main;
553 vpe_client_registration_t rp;
554 vl_api_want_interface_combined_stats_reply_t *rmp;
557 vl_api_registration_t *reg;
560 swif = ~0; //Using same mechanism as _per_interface_
561 rp.client_index = mp->client_index;
562 rp.client_pid = mp->pid;
564 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
568 reg = vl_api_client_index_to_registration (mp->client_index);
572 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
577 rmp = vl_msg_api_alloc (sizeof (*rmp));
578 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
579 rmp->context = mp->context;
580 rmp->retval = retval;
582 vl_api_send_msg (reg, (u8 *) rmp);
586 vl_api_vnet_interface_combined_counters_t_handler
587 (vl_api_vnet_interface_combined_counters_t * mp)
589 vpe_client_registration_t *clients, client;
590 stats_main_t *sm = &stats_main;
591 vl_api_registration_t *reg, *reg_prev = NULL;
592 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
596 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
599 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
600 ~0 /*flag for all */ );
602 for (i = 0; i < vec_len (clients); i++)
605 reg = vl_api_client_index_to_registration (client.client_index);
608 if (reg_prev && vl_api_can_send_msg (reg_prev))
610 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
611 clib_memcpy (mp_copy, mp, mp_size);
612 vl_api_send_msg (reg_prev, (u8 *) mp);
620 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
623 if (reg_prev && vl_api_can_send_msg (reg_prev))
625 vl_api_send_msg (reg_prev, (u8 *) mp);
629 vl_msg_api_free (mp);
634 do_combined_interface_counters (stats_main_t * sm)
636 vl_api_vnet_interface_combined_counters_t *mp = 0;
637 vnet_interface_main_t *im = sm->interface_main;
638 api_main_t *am = sm->api_main;
639 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
640 svm_queue_t *q = shmem_hdr->vl_input_queue;
641 vlib_combined_counter_main_t *cm;
642 u32 items_this_message = 0;
643 vlib_counter_t v, *vp = 0;
646 vnet_interface_counter_lock (im);
648 vec_foreach (cm, im->combined_sw_if_counters)
650 n_counts = vlib_combined_counter_n_counters (cm);
651 for (i = 0; i < n_counts; i++)
655 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
658 mp = vl_msg_api_alloc_as_if_client
659 (sizeof (*mp) + items_this_message * sizeof (v));
660 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
661 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
662 mp->first_sw_if_index = htonl (i);
664 vp = (vlib_counter_t *) mp->data;
666 vlib_get_combined_counter (cm, i, &v);
667 clib_mem_unaligned (&vp->packets, u64)
668 = clib_host_to_net_u64 (v.packets);
669 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
672 if (mp->count == items_this_message)
674 mp->count = htonl (items_this_message);
675 /* Send to the main thread... */
676 vl_msg_api_send_shmem (q, (u8 *) & mp);
682 vnet_interface_counter_unlock (im);
685 /**********************************
686 * Per Interface Combined stats
687 **********************************/
689 /* Request from client registering interfaces it wants */
691 vl_api_want_per_interface_combined_stats_t_handler
692 (vl_api_want_per_interface_combined_stats_t * mp)
694 stats_main_t *sm = &stats_main;
695 vpe_client_registration_t rp;
696 vl_api_want_per_interface_combined_stats_reply_t *rmp;
697 vlib_combined_counter_main_t *cm;
700 vl_api_registration_t *reg;
701 u32 i, swif, num = 0;
703 num = ntohl (mp->num);
706 * Validate sw_if_indexes before registering
708 for (i = 0; i < num; i++)
710 swif = ntohl (mp->sw_ifs[i]);
713 * Check its a real sw_if_index that the client is allowed to see
717 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
719 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
725 for (i = 0; i < num; i++)
727 swif = ntohl (mp->sw_ifs[i]);
729 rp.client_index = mp->client_index;
730 rp.client_pid = mp->pid;
731 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
732 swif, ntohl (mp->enable_disable));
736 reg = vl_api_client_index_to_registration (mp->client_index);
739 for (i = 0; i < num; i++)
741 swif = ntohl (mp->sw_ifs[i]);
744 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
750 rmp = vl_msg_api_alloc (sizeof (*rmp));
751 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
752 rmp->context = mp->context;
753 rmp->retval = retval;
755 vl_api_send_msg (reg, (u8 *) rmp);
758 /* Per Interface Combined distribution to client */
760 do_combined_per_interface_counters (stats_main_t * sm)
762 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
763 vnet_interface_main_t *im = sm->interface_main;
764 api_main_t *am = sm->api_main;
765 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
766 vl_api_registration_t *vl_reg;
767 vlib_combined_counter_main_t *cm;
768 vl_api_vnet_combined_counter_t *vp = 0;
771 vpe_client_stats_registration_t *reg;
772 vpe_client_registration_t *client;
773 u32 *sw_if_index = 0;
775 vnet_interface_counter_lock (im);
777 vec_reset_length (sm->regs_tmp);
781 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
782 ({ vec_add1 (sm->regs_tmp, reg); }));
785 for (i = 0; i < vec_len (sm->regs_tmp); i++)
787 reg = sm->regs_tmp[i];
790 vnet_interface_counter_unlock (im);
791 do_combined_interface_counters (sm);
792 vnet_interface_counter_lock (im);
795 vec_reset_length (sm->clients_tmp);
798 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
802 for (j = 0; j < vec_len (sm->clients_tmp); j++)
804 client = sm->clients_tmp[j];
806 vl_reg = vl_api_client_index_to_registration (client->client_index);
808 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
812 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
813 reg->item, client->client_index);
816 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
817 memset (mp, 0, sizeof (*mp));
820 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
823 * count will eventually be used to optimise the batching
824 * of per client messages for each stat. For now setting this to 1 then
825 * iterate. This will not affect API.
827 * FIXME instead of enqueueing here, this should be sent to a batch
828 * storer for per-client transmission. Each "mp" sent would be a single entry
829 * and if a client is listening to other sw_if_indexes for same, it would be
830 * appended to that *mp
834 * - capturing the timestamp of the counters "when VPP knew them" is important.
835 * Less so is that the timing of the delivery to the control plane be in the same
838 * i.e. As long as the control plane can delta messages from VPP and work out
839 * velocity etc based on the timestamp, it can do so in a more "batch mode".
841 * It would be beneficial to keep a "per-client" message queue, and then
842 * batch all the stat messages for a client into one message, with
843 * discrete timestamps.
845 * Given this particular API is for "per interface" one assumes that the scale
846 * is less than the ~0 case, which the prior API is suited for.
850 * 1 message per api call for now
852 mp->count = htonl (1);
853 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
855 vp = (vl_api_vnet_combined_counter_t *) mp->data;
856 vp->sw_if_index = htonl (reg->item);
858 im = &vnet_get_main ()->interface_main;
861 cm = im->combined_sw_if_counters + X; \
862 vlib_get_combined_counter (cm, reg->item, &v); \
863 clib_mem_unaligned (&vp->x##_packets, u64) = \
864 clib_host_to_net_u64 (v.packets); \
865 clib_mem_unaligned (&vp->x##_bytes, u64) = \
866 clib_host_to_net_u64 (v.bytes);
869 _(VNET_INTERFACE_COUNTER_RX, rx);
870 _(VNET_INTERFACE_COUNTER_TX, tx);
871 _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
872 _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
873 _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
874 _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
875 _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
876 _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
880 vl_api_send_msg (vl_reg, (u8 *) mp);
884 vnet_interface_counter_unlock (im);
887 /**********************************
888 * Per Interface simple stats
889 **********************************/
891 /* Request from client registering interfaces it wants */
893 vl_api_want_per_interface_simple_stats_t_handler
894 (vl_api_want_per_interface_simple_stats_t * mp)
896 stats_main_t *sm = &stats_main;
897 vpe_client_registration_t rp;
898 vl_api_want_per_interface_simple_stats_reply_t *rmp;
899 vlib_simple_counter_main_t *cm;
902 vl_api_registration_t *reg;
903 u32 i, swif, num = 0;
905 num = ntohl (mp->num);
907 for (i = 0; i < num; i++)
909 swif = ntohl (mp->sw_ifs[i]);
911 /* Check its a real sw_if_index that the client is allowed to see */
914 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
916 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
922 for (i = 0; i < num; i++)
924 swif = ntohl (mp->sw_ifs[i]);
926 rp.client_index = mp->client_index;
927 rp.client_pid = mp->pid;
928 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
929 swif, ntohl (mp->enable_disable));
933 reg = vl_api_client_index_to_registration (mp->client_index);
935 /* Client may have disconnected abruptly, clean up */
938 for (i = 0; i < num; i++)
940 swif = ntohl (mp->sw_ifs[i]);
942 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
950 rmp = vl_msg_api_alloc (sizeof (*rmp));
951 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
952 rmp->context = mp->context;
953 rmp->retval = retval;
955 vl_api_send_msg (reg, (u8 *) rmp);
958 /* Per Interface Simple distribution to client */
960 do_simple_per_interface_counters (stats_main_t * sm)
962 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
963 vnet_interface_main_t *im = sm->interface_main;
964 api_main_t *am = sm->api_main;
965 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
966 vl_api_registration_t *vl_reg;
967 vlib_simple_counter_main_t *cm;
969 vpe_client_stats_registration_t *reg;
970 vpe_client_registration_t *client;
971 u32 timestamp, count;
972 vl_api_vnet_simple_counter_t *vp = 0;
975 vnet_interface_counter_lock (im);
977 vec_reset_length (sm->regs_tmp);
981 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
982 ({ vec_add1 (sm->regs_tmp, reg); }));
985 for (i = 0; i < vec_len (sm->regs_tmp); i++)
987 reg = sm->regs_tmp[i];
990 vnet_interface_counter_unlock (im);
991 do_simple_interface_counters (sm);
992 vnet_interface_counter_lock (im);
995 vec_reset_length (sm->clients_tmp);
998 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
1002 for (j = 0; j < vec_len (sm->clients_tmp); j++)
1004 client = sm->clients_tmp[j];
1005 vl_reg = vl_api_client_index_to_registration (client->client_index);
1007 /* Client may have disconnected abrubtly, clean up */
1011 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1012 reg->item, client->client_index);
1016 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1017 memset (mp, 0, sizeof (*mp));
1018 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1021 * count will eventually be used to optimise the batching
1022 * of per client messages for each stat. For now setting this to 1 then
1023 * iterate. This will not affect API.
1025 * FIXME instead of enqueueing here, this should be sent to a batch
1026 * storer for per-client transmission. Each "mp" sent would be a single entry
1027 * and if a client is listening to other sw_if_indexes for same, it would be
1028 * appended to that *mp
1032 * - capturing the timestamp of the counters "when VPP knew them" is important.
1033 * Less so is that the timing of the delivery to the control plane be in the same
1036 * i.e. As long as the control plane can delta messages from VPP and work out
1037 * velocity etc based on the timestamp, it can do so in a more "batch mode".
1039 * It would be beneficial to keep a "per-client" message queue, and then
1040 * batch all the stat messages for a client into one message, with
1041 * discrete timestamps.
1043 * Given this particular API is for "per interface" one assumes that the scale
1044 * is less than the ~0 case, which the prior API is suited for.
1048 * 1 message per api call for now
1050 mp->count = htonl (1);
1051 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1052 vp = (vl_api_vnet_simple_counter_t *) mp->data;
1054 vp->sw_if_index = htonl (reg->item);
1056 // VNET_INTERFACE_COUNTER_DROP
1057 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1058 v = vlib_get_simple_counter (cm, reg->item);
1059 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1061 // VNET_INTERFACE_COUNTER_PUNT
1062 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1063 v = vlib_get_simple_counter (cm, reg->item);
1064 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1066 // VNET_INTERFACE_COUNTER_IP4
1067 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1068 v = vlib_get_simple_counter (cm, reg->item);
1069 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1071 //VNET_INTERFACE_COUNTER_IP6
1072 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1073 v = vlib_get_simple_counter (cm, reg->item);
1074 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1076 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1077 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1078 v = vlib_get_simple_counter (cm, reg->item);
1079 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1080 clib_host_to_net_u64 (v);
1082 //VNET_INTERFACE_COUNTER_RX_MISS
1083 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1084 v = vlib_get_simple_counter (cm, reg->item);
1085 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1087 //VNET_INTERFACE_COUNTER_RX_ERROR
1088 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1089 v = vlib_get_simple_counter (cm, reg->item);
1090 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1092 //VNET_INTERFACE_COUNTER_TX_ERROR
1093 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1094 v = vlib_get_simple_counter (cm, reg->item);
1095 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1097 //VNET_INTERFACE_COUNTER_MPLS
1098 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1099 v = vlib_get_simple_counter (cm, reg->item);
1100 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1102 vl_api_send_msg (vl_reg, (u8 *) mp);
1106 vnet_interface_counter_unlock (im);
1109 /**********************************
1111 **********************************/
1114 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1116 struct timespec _req, *req = &_req;
1117 struct timespec _rem, *rem = &_rem;
1120 req->tv_nsec = nsec;
1123 if (nanosleep (req, rem) == 0)
1128 clib_unix_warning ("nanosleep");
1134 * @brief The context passed when collecting adjacency counters
1136 typedef struct ip4_nbr_stats_ctx_t_
1139 * The SW IF index all these adjs belong to
1144 * A vector of ip4 nbr counters
1146 vl_api_ip4_nbr_counter_t *counters;
1147 } ip4_nbr_stats_ctx_t;
1149 static adj_walk_rc_t
1150 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1152 vl_api_ip4_nbr_counter_t *vl_counter;
1153 vlib_counter_t adj_counter;
1154 ip4_nbr_stats_ctx_t *ctx;
1155 ip_adjacency_t *adj;
1158 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1160 if (0 != adj_counter.packets)
1162 vec_add2 (ctx->counters, vl_counter, 1);
1165 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1166 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1167 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1168 vl_counter->link_type = adj->ia_link;
1170 return (ADJ_WALK_RC_CONTINUE);
1173 #define MIN(x,y) (((x)<(y))?(x):(y))
1176 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1178 api_main_t *am = sm->api_main;
1179 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1180 svm_queue_t *q = shmem_hdr->vl_input_queue;
1181 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1185 * If the walk context has counters, which may be left over from the last
1186 * suspend, then we continue from there.
1188 while (0 != vec_len (ctx->counters))
1190 u32 n_items = MIN (vec_len (ctx->counters),
1191 IP4_FIB_COUNTER_BATCH_SIZE);
1194 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1196 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1199 (vl_api_ip4_nbr_counter_t)));
1200 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1201 mp->count = ntohl (n_items);
1202 mp->sw_if_index = ntohl (ctx->sw_if_index);
1207 * copy the counters from the back of the context, then we can easily
1208 * 'erase' them by resetting the vector length.
1209 * The order we push the stats to the caller is not important.
1212 &ctx->counters[vec_len (ctx->counters) - n_items],
1213 n_items * sizeof (*ctx->counters));
1215 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1221 pause = svm_queue_is_full (q);
1223 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1224 svm_queue_unlock (q);
1228 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1229 STATS_RELEASE_DELAY_NS);
1234 do_ip4_nbr_counters (stats_main_t * sm)
1236 vnet_main_t *vnm = vnet_get_main ();
1237 vnet_interface_main_t *im = &vnm->interface_main;
1238 vnet_sw_interface_t *si;
1240 ip4_nbr_stats_ctx_t ctx = {
1246 pool_foreach (si, im->sw_interfaces,
1249 * update the interface we are now concerned with
1251 ctx.sw_if_index = si->sw_if_index;
1254 * we are about to walk another interface, so we shouldn't have any pending
1257 ASSERT(ctx.counters == NULL);
1260 * visit each neighbour adjacency on the interface and collect
1261 * its current stats.
1262 * Because we hold the lock the walk is synchronous, so safe to routing
1263 * updates. It's limited in work by the number of adjacenies on an
1264 * interface, which is typically not huge.
1266 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1267 adj_nbr_walk (si->sw_if_index,
1274 * if this interface has some adjacencies with counters then ship them,
1275 * else continue to the next interface.
1277 if (NULL != ctx.counters)
1279 ip4_nbr_ship(sm, &ctx);
1286 * @brief The context passed when collecting adjacency counters
1288 typedef struct ip6_nbr_stats_ctx_t_
1291 * The SW IF index all these adjs belong to
1296 * A vector of ip6 nbr counters
1298 vl_api_ip6_nbr_counter_t *counters;
1299 } ip6_nbr_stats_ctx_t;
1301 static adj_walk_rc_t
1302 ip6_nbr_stats_cb (adj_index_t ai,
1305 vl_api_ip6_nbr_counter_t *vl_counter;
1306 vlib_counter_t adj_counter;
1307 ip6_nbr_stats_ctx_t *ctx;
1308 ip_adjacency_t *adj;
1311 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1313 if (0 != adj_counter.packets)
1315 vec_add2(ctx->counters, vl_counter, 1);
1318 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1319 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1320 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1321 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1322 vl_counter->link_type = adj->ia_link;
1324 return (ADJ_WALK_RC_CONTINUE);
1327 #define MIN(x,y) (((x)<(y))?(x):(y))
1330 ip6_nbr_ship (stats_main_t * sm,
1331 ip6_nbr_stats_ctx_t *ctx)
1333 api_main_t *am = sm->api_main;
1334 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1335 svm_queue_t *q = shmem_hdr->vl_input_queue;
1336 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1340 * If the walk context has counters, which may be left over from the last
1341 * suspend, then we continue from there.
1343 while (0 != vec_len(ctx->counters))
1345 u32 n_items = MIN (vec_len (ctx->counters),
1346 IP6_FIB_COUNTER_BATCH_SIZE);
1349 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1351 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1354 (vl_api_ip6_nbr_counter_t)));
1355 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1356 mp->count = ntohl (n_items);
1357 mp->sw_if_index = ntohl (ctx->sw_if_index);
1362 * copy the counters from the back of the context, then we can easily
1363 * 'erase' them by resetting the vector length.
1364 * The order we push the stats to the caller is not important.
1367 &ctx->counters[vec_len (ctx->counters) - n_items],
1368 n_items * sizeof (*ctx->counters));
1370 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1376 pause = svm_queue_is_full (q);
1378 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1379 svm_queue_unlock (q);
1383 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1384 STATS_RELEASE_DELAY_NS);
1389 do_ip6_nbr_counters (stats_main_t * sm)
1391 vnet_main_t *vnm = vnet_get_main ();
1392 vnet_interface_main_t *im = &vnm->interface_main;
1393 vnet_sw_interface_t *si;
1395 ip6_nbr_stats_ctx_t ctx = {
1401 pool_foreach (si, im->sw_interfaces,
1404 * update the interface we are now concerned with
1406 ctx.sw_if_index = si->sw_if_index;
1409 * we are about to walk another interface, so we shouldn't have any pending
1412 ASSERT(ctx.counters == NULL);
1415 * visit each neighbour adjacency on the interface and collect
1416 * its current stats.
1417 * Because we hold the lock the walk is synchronous, so safe to routing
1418 * updates. It's limited in work by the number of adjacenies on an
1419 * interface, which is typically not huge.
1421 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1422 adj_nbr_walk (si->sw_if_index,
1429 * if this interface has some adjacencies with counters then ship them,
1430 * else continue to the next interface.
1432 if (NULL != ctx.counters)
1434 ip6_nbr_ship(sm, &ctx);
1441 do_ip4_fib_counters (stats_main_t * sm)
1443 ip4_main_t *im4 = &ip4_main;
1444 api_main_t *am = sm->api_main;
1445 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1446 svm_queue_t *q = shmem_hdr->vl_input_queue;
1450 do_ip46_fibs_t *do_fibs;
1451 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1452 u32 items_this_message;
1453 vl_api_ip4_fib_counter_t *ctrp = 0;
1454 u32 start_at_fib_index = 0;
1457 do_fibs = &sm->do_ip46_fibs;
1460 vec_reset_length (do_fibs->fibs);
1462 pool_foreach (fib, im4->fibs,
1463 ({vec_add1(do_fibs->fibs,fib);}));
1467 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1469 fib = do_fibs->fibs[j];
1470 /* We may have bailed out due to control-plane activity */
1471 while ((fib - im4->fibs) < start_at_fib_index)
1474 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1478 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1479 mp = vl_msg_api_alloc_as_if_client
1481 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1482 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1484 mp->vrf_id = ntohl (fib->ft_table_id);
1485 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1489 /* happens if the last FIB was empty... */
1490 ASSERT (mp->count == 0);
1491 mp->vrf_id = ntohl (fib->ft_table_id);
1494 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1496 vec_reset_length (do_fibs->ip4routes);
1497 vec_reset_length (do_fibs->results);
1499 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1501 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1505 vec_reset_length (do_fibs->pvec);
1507 x.address_length = i;
1509 hash_foreach_pair (p, hash, (
1511 vec_add1 (do_fibs->pvec, p);}
1513 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1515 p = do_fibs->pvec[k];
1516 x.address.data_u32 = p->key;
1517 x.index = p->value[0];
1519 vec_add1 (do_fibs->ip4routes, x);
1520 if (sm->data_structure_lock->release_hint)
1522 start_at_fib_index = fib - im4->fibs;
1524 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1525 STATS_RELEASE_DELAY_NS);
1527 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1533 vec_foreach (r, do_fibs->ip4routes)
1536 const dpo_id_t *dpo_id;
1539 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1540 index = (u32) dpo_id->dpoi_index;
1542 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1545 * If it has actually
1546 * seen at least one packet, send it.
1551 /* already in net byte order */
1552 ctrp->address = r->address.as_u32;
1553 ctrp->address_length = r->address_length;
1554 ctrp->packets = clib_host_to_net_u64 (c.packets);
1555 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1559 if (mp->count == items_this_message)
1561 mp->count = htonl (items_this_message);
1563 * If the main thread's input queue is stuffed,
1564 * drop the data structure lock (which the main thread
1565 * may want), and take a pause.
1568 if (svm_queue_is_full (q))
1571 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1572 svm_queue_unlock (q);
1574 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1575 STATS_RELEASE_DELAY_NS);
1578 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1579 svm_queue_unlock (q);
1581 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1582 mp = vl_msg_api_alloc_as_if_client
1584 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1585 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1587 mp->vrf_id = ntohl (fib->ft_table_id);
1588 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1590 } /* for each (mp or single) adj */
1591 if (sm->data_structure_lock->release_hint)
1593 start_at_fib_index = fib - im4->fibs;
1595 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1597 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1600 } /* vec_foreach (routes) */
1604 /* Flush any data from this fib */
1607 mp->count = htonl (mp->count);
1608 vl_msg_api_send_shmem (q, (u8 *) & mp);
1613 /* If e.g. the last FIB had no reportable routes, free the buffer */
1615 vl_msg_api_free (mp);
1619 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1621 stats_main_t *sm = ctx;
1622 do_ip46_fibs_t *do_fibs;
1623 mfib_entry_t *entry;
1625 do_fibs = &sm->do_ip46_fibs;
1626 entry = mfib_entry_get (fei);
1628 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1634 do_ip4_mfib_counters (stats_main_t * sm)
1636 ip4_main_t *im4 = &ip4_main;
1637 api_main_t *am = sm->api_main;
1638 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1639 svm_queue_t *q = shmem_hdr->vl_input_queue;
1642 do_ip46_fibs_t *do_fibs;
1643 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1644 u32 items_this_message;
1645 vl_api_ip4_mfib_counter_t *ctrp = 0;
1646 u32 start_at_mfib_index = 0;
1649 do_fibs = &sm->do_ip46_fibs;
1651 vec_reset_length (do_fibs->mfibs);
1653 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1656 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1658 mfib = do_fibs->mfibs[j];
1659 /* We may have bailed out due to control-plane activity */
1660 while ((mfib - im4->mfibs) < start_at_mfib_index)
1665 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1666 mp = vl_msg_api_alloc_as_if_client
1668 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1669 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1671 mp->vrf_id = ntohl (mfib->mft_table_id);
1672 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1676 /* happens if the last MFIB was empty... */
1677 ASSERT (mp->count == 0);
1678 mp->vrf_id = ntohl (mfib->mft_table_id);
1681 vec_reset_length (do_fibs->mroutes);
1684 * walk the table with table updates blocked
1686 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1688 mfib_table_walk (mfib->mft_index,
1689 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1692 vec_foreach (pfx, do_fibs->mroutes)
1694 const dpo_id_t *dpo_id;
1695 fib_node_index_t mfei;
1700 * re-lookup the entry, since we suspend during the collection
1702 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1704 if (FIB_NODE_INDEX_INVALID == mfei)
1707 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1708 index = (u32) dpo_id->dpoi_index;
1710 vlib_get_combined_counter (&replicate_main.repm_counters,
1711 dpo_id->dpoi_index, &c);
1713 * If it has seen at least one packet, send it.
1717 /* already in net byte order */
1718 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1719 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1720 ctrp->group_length = pfx->fp_len;
1721 ctrp->packets = clib_host_to_net_u64 (c.packets);
1722 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1726 if (mp->count == items_this_message)
1728 mp->count = htonl (items_this_message);
1730 * If the main thread's input queue is stuffed,
1731 * drop the data structure lock (which the main thread
1732 * may want), and take a pause.
1736 while (svm_queue_is_full (q))
1738 svm_queue_unlock (q);
1739 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1740 STATS_RELEASE_DELAY_NS);
1743 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1744 svm_queue_unlock (q);
1746 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1747 mp = vl_msg_api_alloc_as_if_client
1749 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1750 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1752 mp->vrf_id = ntohl (mfib->mft_table_id);
1753 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1758 /* Flush any data from this mfib */
1761 mp->count = htonl (mp->count);
1762 vl_msg_api_send_shmem (q, (u8 *) & mp);
1767 /* If e.g. the last FIB had no reportable routes, free the buffer */
1769 vl_msg_api_free (mp);
1773 do_ip6_mfib_counters (stats_main_t * sm)
1775 ip6_main_t *im6 = &ip6_main;
1776 api_main_t *am = sm->api_main;
1777 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1778 svm_queue_t *q = shmem_hdr->vl_input_queue;
1781 do_ip46_fibs_t *do_fibs;
1782 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1783 u32 items_this_message;
1784 vl_api_ip6_mfib_counter_t *ctrp = 0;
1785 u32 start_at_mfib_index = 0;
1788 do_fibs = &sm->do_ip46_fibs;
1790 vec_reset_length (do_fibs->mfibs);
1792 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1795 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1797 mfib = do_fibs->mfibs[j];
1798 /* We may have bailed out due to control-plane activity */
1799 while ((mfib - im6->mfibs) < start_at_mfib_index)
1804 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1805 mp = vl_msg_api_alloc_as_if_client
1807 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1808 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1810 mp->vrf_id = ntohl (mfib->mft_table_id);
1811 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1815 /* happens if the last MFIB was empty... */
1816 ASSERT (mp->count == 0);
1817 mp->vrf_id = ntohl (mfib->mft_table_id);
1820 vec_reset_length (do_fibs->mroutes);
1823 * walk the table with table updates blocked
1825 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1827 mfib_table_walk (mfib->mft_index,
1828 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1831 vec_foreach (pfx, do_fibs->mroutes)
1833 const dpo_id_t *dpo_id;
1834 fib_node_index_t mfei;
1839 * re-lookup the entry, since we suspend during the collection
1841 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1843 if (FIB_NODE_INDEX_INVALID == mfei)
1846 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1847 index = (u32) dpo_id->dpoi_index;
1849 vlib_get_combined_counter (&replicate_main.repm_counters,
1850 dpo_id->dpoi_index, &c);
1852 * If it has seen at least one packet, send it.
1856 /* already in net byte order */
1857 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1858 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1859 ctrp->group_length = pfx->fp_len;
1860 ctrp->packets = clib_host_to_net_u64 (c.packets);
1861 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1865 if (mp->count == items_this_message)
1867 mp->count = htonl (items_this_message);
1869 * If the main thread's input queue is stuffed,
1870 * drop the data structure lock (which the main thread
1871 * may want), and take a pause.
1875 while (svm_queue_is_full (q))
1877 svm_queue_unlock (q);
1878 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1879 STATS_RELEASE_DELAY_NS);
1882 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1883 svm_queue_unlock (q);
1885 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1886 mp = vl_msg_api_alloc_as_if_client
1888 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1889 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1891 mp->vrf_id = ntohl (mfib->mft_table_id);
1892 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1897 /* Flush any data from this mfib */
1900 mp->count = htonl (mp->count);
1901 vl_msg_api_send_shmem (q, (u8 *) & mp);
1906 /* If e.g. the last FIB had no reportable routes, free the buffer */
1908 vl_msg_api_free (mp);
1914 ip6_route_t **routep;
1916 } add_routes_in_fib_arg_t;
1919 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1921 add_routes_in_fib_arg_t *ap = arg;
1922 stats_main_t *sm = ap->sm;
1924 if (sm->data_structure_lock->release_hint)
1925 clib_longjmp (&sm->jmp_buf, 1);
1927 if (kvp->key[2] >> 32 == ap->fib_index)
1929 ip6_address_t *addr;
1931 addr = (ip6_address_t *) kvp;
1932 vec_add2 (*ap->routep, r, 1);
1933 r->address = addr[0];
1934 r->address_length = kvp->key[2] & 0xFF;
1935 r->index = kvp->value;
1940 do_ip6_fib_counters (stats_main_t * sm)
1942 ip6_main_t *im6 = &ip6_main;
1943 api_main_t *am = sm->api_main;
1944 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1945 svm_queue_t *q = shmem_hdr->vl_input_queue;
1948 do_ip46_fibs_t *do_fibs;
1949 vl_api_vnet_ip6_fib_counters_t *mp = 0;
1950 u32 items_this_message;
1951 vl_api_ip6_fib_counter_t *ctrp = 0;
1952 u32 start_at_fib_index = 0;
1953 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
1954 add_routes_in_fib_arg_t _a, *a = &_a;
1957 do_fibs = &sm->do_ip46_fibs;
1959 vec_reset_length (do_fibs->fibs);
1961 pool_foreach (fib, im6->fibs,
1962 ({vec_add1(do_fibs->fibs,fib);}));
1966 for (i = 0; i < vec_len (do_fibs->fibs); i++)
1968 fib = do_fibs->fibs[i];
1969 /* We may have bailed out due to control-plane activity */
1970 while ((fib - im6->fibs) < start_at_fib_index)
1975 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1976 mp = vl_msg_api_alloc_as_if_client
1978 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1979 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1981 mp->vrf_id = ntohl (fib->ft_table_id);
1982 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1985 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1987 vec_reset_length (do_fibs->ip6routes);
1988 vec_reset_length (do_fibs->results);
1990 a->fib_index = fib - im6->fibs;
1991 a->routep = &do_fibs->ip6routes;
1994 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
1996 start_at_fib_index = fib - im6->fibs;
1997 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
2002 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2003 STATS_RELEASE_DELAY_NS);
2005 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2009 vec_foreach (r, do_fibs->ip6routes)
2013 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2016 * If it has actually
2017 * seen at least one packet, send it.
2021 /* already in net byte order */
2022 ctrp->address[0] = r->address.as_u64[0];
2023 ctrp->address[1] = r->address.as_u64[1];
2024 ctrp->address_length = (u8) r->address_length;
2025 ctrp->packets = clib_host_to_net_u64 (c.packets);
2026 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2030 if (mp->count == items_this_message)
2032 mp->count = htonl (items_this_message);
2034 * If the main thread's input queue is stuffed,
2035 * drop the data structure lock (which the main thread
2036 * may want), and take a pause.
2039 if (svm_queue_is_full (q))
2042 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2043 svm_queue_unlock (q);
2045 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2046 STATS_RELEASE_DELAY_NS);
2049 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2050 svm_queue_unlock (q);
2052 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2053 mp = vl_msg_api_alloc_as_if_client
2055 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2056 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2058 mp->vrf_id = ntohl (fib->ft_table_id);
2059 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2063 if (sm->data_structure_lock->release_hint)
2065 start_at_fib_index = fib - im6->fibs;
2067 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2069 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2072 } /* vec_foreach (routes) */
2076 /* Flush any data from this fib */
2079 mp->count = htonl (mp->count);
2080 vl_msg_api_send_shmem (q, (u8 *) & mp);
2085 /* If e.g. the last FIB had no reportable routes, free the buffer */
2087 vl_msg_api_free (mp);
2090 typedef struct udp_encap_stat_t_
2096 typedef struct udp_encap_stats_walk_t_
2098 udp_encap_stat_t *stats;
2099 } udp_encap_stats_walk_t;
2102 udp_encap_stats_walk_cb (index_t uei, void *arg)
2104 udp_encap_stats_walk_t *ctx = arg;
2105 udp_encap_stat_t *stat;
2108 ue = udp_encap_get (uei);
2109 vec_add2 (ctx->stats, stat, 1);
2112 udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]);
2118 udp_encap_ship (udp_encap_stats_walk_t * ctx)
2120 vl_api_vnet_udp_encap_counters_t *mp;
2121 vl_shmem_hdr_t *shmem_hdr;
2129 shmem_hdr = am->shmem_hdr;
2130 q = shmem_hdr->vl_input_queue;
2133 * If the walk context has counters, which may be left over from the last
2134 * suspend, then we continue from there.
2136 while (0 != vec_len (ctx->stats))
2138 u32 n_items = MIN (vec_len (ctx->stats),
2139 UDP_ENCAP_COUNTER_BATCH_SIZE);
2142 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2144 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2147 (vl_api_udp_encap_counter_t)));
2148 mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2149 mp->count = ntohl (n_items);
2152 * copy the counters from the back of the context, then we can easily
2153 * 'erase' them by resetting the vector length.
2154 * The order we push the stats to the caller is not important.
2157 &ctx->stats[vec_len (ctx->stats) - n_items],
2158 n_items * sizeof (*ctx->stats));
2160 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2166 pause = svm_queue_is_full (q);
2168 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2169 svm_queue_unlock (q);
2173 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2174 STATS_RELEASE_DELAY_NS);
2179 do_udp_encap_counters (stats_main_t * sm)
2181 udp_encap_stat_t *stat;
2183 udp_encap_stats_walk_t ctx = {
2187 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2188 udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
2191 udp_encap_ship (&ctx);
2195 stats_set_poller_delay (u32 poller_delay_sec)
2197 stats_main_t *sm = &stats_main;
2198 if (!poller_delay_sec)
2200 return VNET_API_ERROR_INVALID_ARGUMENT;
2204 sm->stats_poll_interval_in_seconds = poller_delay_sec;
2209 static clib_error_t *
2210 stats_config (vlib_main_t * vm, unformat_input_t * input)
2214 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2216 if (unformat (input, "interval %u", &sec))
2218 int rv = stats_set_poller_delay (sec);
2221 return clib_error_return (0,
2222 "`stats_set_poller_delay' API call failed, rv=%d:%U",
2223 (int) rv, format_vnet_api_errno, rv);
2229 return clib_error_return (0, "unknown input '%U'",
2230 format_unformat_error, input);
2236 /* stats { ... } configuration. */
2239 * @cfgcmd{interval, <seconds>}
2240 * Configure stats poller delay to be @c seconds.
2243 VLIB_CONFIG_FUNCTION (stats_config, "stats");
2246 vl_api_stats_get_poller_delay_t_handler
2247 (vl_api_stats_get_poller_delay_t * mp)
2249 stats_main_t *sm = &stats_main;
2250 vl_api_registration_t *reg;
2251 reg = vl_api_client_index_to_registration (mp->client_index);
2254 vl_api_stats_get_poller_delay_reply_t *rmp;
2256 rmp = vl_msg_api_alloc (sizeof (*rmp));
2257 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
2258 rmp->context = mp->context;
2260 rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2262 vl_api_send_msg (reg, (u8 *) rmp);
2267 stats_thread_fn (void *arg)
2269 stats_main_t *sm = &stats_main;
2270 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2271 vlib_thread_main_t *tm = vlib_get_thread_main ();
2273 /* stats thread wants no signals. */
2277 pthread_sigmask (SIG_SETMASK, &s, 0);
2280 if (vec_len (tm->thread_prefix))
2281 vlib_set_thread_name ((char *)
2282 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2284 clib_mem_set_heap (w->thread_mheap);
2288 ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
2291 if (!(sm->enable_poller))
2296 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2297 do_combined_per_interface_counters (sm);
2300 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2301 do_simple_per_interface_counters (sm);
2303 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2304 do_ip4_fib_counters (sm);
2306 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2307 do_ip6_fib_counters (sm);
2309 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2310 do_ip4_mfib_counters (sm);
2312 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2313 do_ip6_mfib_counters (sm);
2315 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2316 do_ip4_nbr_counters (sm);
2318 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2319 do_ip6_nbr_counters (sm);
2321 if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS]))
2322 do_udp_encap_counters (sm);
2327 vl_api_vnet_interface_simple_counters_t_handler
2328 (vl_api_vnet_interface_simple_counters_t * mp)
2330 vpe_client_registration_t *clients, client;
2331 stats_main_t *sm = &stats_main;
2332 vl_api_registration_t *reg, *reg_prev = NULL;
2333 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2337 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2340 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2341 ~0 /*flag for all */ );
2343 for (i = 0; i < vec_len (clients); i++)
2345 client = clients[i];
2346 reg = vl_api_client_index_to_registration (client.client_index);
2349 if (reg_prev && vl_api_can_send_msg (reg_prev))
2351 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2352 clib_memcpy (mp_copy, mp, mp_size);
2353 vl_api_send_msg (reg_prev, (u8 *) mp);
2361 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2362 client.client_index);
2369 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2372 if (reg_prev && vl_api_can_send_msg (reg_prev))
2374 vl_api_send_msg (reg_prev, (u8 *) mp);
2378 vl_msg_api_free (mp);
2383 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2385 stats_main_t *sm = &stats_main;
2386 vl_api_registration_t *reg, *reg_prev = NULL;
2387 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2389 vpe_client_registration_t *clients, client;
2392 mp_size = sizeof (*mp_copy) +
2393 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2396 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2398 for (i = 0; i < vec_len (clients); i++)
2400 client = clients[i];
2401 reg = vl_api_client_index_to_registration (client.client_index);
2404 if (reg_prev && vl_api_can_send_msg (reg_prev))
2406 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2407 clib_memcpy (mp_copy, mp, mp_size);
2408 vl_api_send_msg (reg_prev, (u8 *) mp);
2415 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2416 ~0, client.client_index);
2422 if (reg_prev && vl_api_can_send_msg (reg_prev))
2424 vl_api_send_msg (reg_prev, (u8 *) mp);
2428 vl_msg_api_free (mp);
2433 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2435 stats_main_t *sm = &stats_main;
2436 vl_api_registration_t *reg, *reg_prev = NULL;
2437 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2439 vpe_client_registration_t *clients, client;
2442 mp_size = sizeof (*mp_copy) +
2443 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2446 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2448 for (i = 0; i < vec_len (clients); i++)
2450 client = clients[i];
2451 reg = vl_api_client_index_to_registration (client.client_index);
2454 if (reg_prev && vl_api_can_send_msg (reg_prev))
2456 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2457 clib_memcpy (mp_copy, mp, mp_size);
2458 vl_api_send_msg (reg_prev, (u8 *) mp);
2465 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2466 ~0, client.client_index);
2473 if (reg_prev && vl_api_can_send_msg (reg_prev))
2475 vl_api_send_msg (reg_prev, (u8 *) mp);
2479 vl_msg_api_free (mp);
2484 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2486 stats_main_t *sm = &stats_main;
2487 vl_api_registration_t *reg, *reg_prev = NULL;
2488 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2490 vpe_client_registration_t *clients, client;
2493 mp_size = sizeof (*mp_copy) +
2494 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2497 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2499 for (i = 0; i < vec_len (clients); i++)
2501 client = clients[i];
2502 reg = vl_api_client_index_to_registration (client.client_index);
2505 if (reg_prev && vl_api_can_send_msg (reg_prev))
2507 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2508 clib_memcpy (mp_copy, mp, mp_size);
2509 vl_api_send_msg (reg_prev, (u8 *) mp);
2516 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2517 ~0, client.client_index);
2524 if (reg_prev && vl_api_can_send_msg (reg_prev))
2526 vl_api_send_msg (reg_prev, (u8 *) mp);
2530 vl_msg_api_free (mp);
2535 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2537 stats_main_t *sm = &stats_main;
2538 vl_api_registration_t *reg, *reg_prev = NULL;
2539 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2541 vpe_client_registration_t *clients, client;
2544 mp_size = sizeof (*mp_copy) +
2545 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2548 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2550 for (i = 0; i < vec_len (clients); i++)
2552 client = clients[i];
2553 reg = vl_api_client_index_to_registration (client.client_index);
2556 if (reg_prev && vl_api_can_send_msg (reg_prev))
2558 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2559 clib_memcpy (mp_copy, mp, mp_size);
2560 vl_api_send_msg (reg_prev, (u8 *) mp);
2567 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2568 ~0, client.client_index);
2575 if (reg_prev && vl_api_can_send_msg (reg_prev))
2577 vl_api_send_msg (reg_prev, (u8 *) mp);
2581 vl_msg_api_free (mp);
2586 vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
2588 stats_main_t *sm = &stats_main;
2589 vpe_client_registration_t rp;
2590 vl_api_want_udp_encap_stats_reply_t *rmp;
2593 vl_api_registration_t *reg;
2596 fib = ~0; //Using same mechanism as _per_interface_
2597 rp.client_index = mp->client_index;
2598 rp.client_pid = mp->pid;
2600 handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2603 reg = vl_api_client_index_to_registration (mp->client_index);
2607 sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2608 fib, mp->client_index);
2612 rmp = vl_msg_api_alloc (sizeof (*rmp));
2613 rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2614 rmp->context = mp->context;
2615 rmp->retval = retval;
2617 vl_api_send_msg (reg, (u8 *) rmp);
2621 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2623 stats_main_t *sm = &stats_main;
2624 vpe_client_registration_t rp;
2625 vl_api_want_stats_reply_t *rmp;
2629 vl_api_registration_t *reg;
2631 item = ~0; //"ALL THE THINGS IN THE THINGS
2632 rp.client_index = mp->client_index;
2633 rp.client_pid = mp->pid;
2635 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2636 item, mp->enable_disable);
2638 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2639 item, mp->enable_disable);
2641 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2642 item, mp->enable_disable);
2644 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2645 item, mp->enable_disable);
2647 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2648 item, mp->enable_disable);
2650 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2651 item, mp->enable_disable);
2654 reg = vl_api_client_index_to_registration (mp->client_index);
2658 rmp = vl_msg_api_alloc (sizeof (*rmp));
2659 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2660 rmp->context = mp->context;
2661 rmp->retval = retval;
2663 vl_api_send_msg (reg, (u8 *) rmp);
2667 vl_api_want_interface_simple_stats_t_handler
2668 (vl_api_want_interface_simple_stats_t * mp)
2670 stats_main_t *sm = &stats_main;
2671 vpe_client_registration_t rp;
2672 vl_api_want_interface_simple_stats_reply_t *rmp;
2676 vl_api_registration_t *reg;
2678 swif = ~0; //Using same mechanism as _per_interface_
2679 rp.client_index = mp->client_index;
2680 rp.client_pid = mp->pid;
2682 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2683 mp->enable_disable);
2686 reg = vl_api_client_index_to_registration (mp->client_index);
2691 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2696 rmp = vl_msg_api_alloc (sizeof (*rmp));
2697 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2698 rmp->context = mp->context;
2699 rmp->retval = retval;
2701 vl_api_send_msg (reg, (u8 *) rmp);
2706 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2708 stats_main_t *sm = &stats_main;
2709 vpe_client_registration_t rp;
2710 vl_api_want_ip4_fib_stats_reply_t *rmp;
2713 vl_api_registration_t *reg;
2716 fib = ~0; //Using same mechanism as _per_interface_
2717 rp.client_index = mp->client_index;
2718 rp.client_pid = mp->pid;
2720 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2721 mp->enable_disable);
2724 reg = vl_api_client_index_to_registration (mp->client_index);
2728 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2729 fib, mp->client_index);
2733 rmp = vl_msg_api_alloc (sizeof (*rmp));
2734 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2735 rmp->context = mp->context;
2736 rmp->retval = retval;
2738 vl_api_send_msg (reg, (u8 *) rmp);
2742 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2744 stats_main_t *sm = &stats_main;
2745 vpe_client_registration_t rp;
2746 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2749 vl_api_registration_t *reg;
2752 mfib = ~0; //Using same mechanism as _per_interface_
2753 rp.client_index = mp->client_index;
2754 rp.client_pid = mp->pid;
2756 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2757 mp->enable_disable);
2760 reg = vl_api_client_index_to_registration (mp->client_index);
2763 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2764 mfib, mp->client_index);
2768 rmp = vl_msg_api_alloc (sizeof (*rmp));
2769 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2770 rmp->context = mp->context;
2771 rmp->retval = retval;
2773 vl_api_send_msg (reg, (u8 *) rmp);
2777 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2779 stats_main_t *sm = &stats_main;
2780 vpe_client_registration_t rp;
2781 vl_api_want_ip4_fib_stats_reply_t *rmp;
2784 vl_api_registration_t *reg;
2787 fib = ~0; //Using same mechanism as _per_interface_
2788 rp.client_index = mp->client_index;
2789 rp.client_pid = mp->pid;
2791 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2792 mp->enable_disable);
2795 reg = vl_api_client_index_to_registration (mp->client_index);
2798 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2799 fib, mp->client_index);
2803 rmp = vl_msg_api_alloc (sizeof (*rmp));
2804 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2805 rmp->context = mp->context;
2806 rmp->retval = retval;
2808 vl_api_send_msg (reg, (u8 *) rmp);
2812 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2814 stats_main_t *sm = &stats_main;
2815 vpe_client_registration_t rp;
2816 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2819 vl_api_registration_t *reg;
2822 mfib = ~0; //Using same mechanism as _per_interface_
2823 rp.client_index = mp->client_index;
2824 rp.client_pid = mp->pid;
2826 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2827 mp->enable_disable);
2830 reg = vl_api_client_index_to_registration (mp->client_index);
2833 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2834 mfib, mp->client_index);
2838 rmp = vl_msg_api_alloc (sizeof (*rmp));
2839 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2840 rmp->context = mp->context;
2841 rmp->retval = retval;
2843 vl_api_send_msg (reg, (u8 *) rmp);
2846 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2848 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2853 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2858 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2860 stats_main_t *sm = &stats_main;
2861 vnet_interface_main_t *im = sm->interface_main;
2862 vl_api_vnet_get_summary_stats_reply_t *rmp;
2863 vlib_combined_counter_main_t *cm;
2866 u64 total_pkts[VLIB_N_RX_TX];
2867 u64 total_bytes[VLIB_N_RX_TX];
2868 vl_api_registration_t *reg;
2870 reg = vl_api_client_index_to_registration (mp->client_index);
2874 rmp = vl_msg_api_alloc (sizeof (*rmp));
2875 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2876 rmp->context = mp->context;
2879 memset (total_pkts, 0, sizeof (total_pkts));
2880 memset (total_bytes, 0, sizeof (total_bytes));
2882 vnet_interface_counter_lock (im);
2884 vec_foreach (cm, im->combined_sw_if_counters)
2886 which = cm - im->combined_sw_if_counters;
2888 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2890 vlib_get_combined_counter (cm, i, &v);
2891 total_pkts[which] += v.packets;
2892 total_bytes[which] += v.bytes;
2895 vnet_interface_counter_unlock (im);
2897 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2898 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2899 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2900 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2902 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2904 vl_api_send_msg (reg, (u8 *) rmp);
2908 stats_memclnt_delete_callback (u32 client_index)
2910 vpe_client_stats_registration_t *rp;
2911 stats_main_t *sm = &stats_main;
2915 /* p = hash_get (sm->stats_registration_hash, client_index); */
2918 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2919 /* pool_put (sm->stats_registrations, rp); */
2920 /* hash_unset (sm->stats_registration_hash, client_index); */
2926 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2927 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2928 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2929 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2930 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2931 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2932 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2933 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2934 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2935 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2936 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2937 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2938 #define vl_api_map_stats_segment_t_print vl_noop_handler
2941 vl_api_map_stats_segment_t_handler (vl_api_map_stats_segment_t * mp)
2943 vl_api_map_stats_segment_reply_t *rmp;
2944 stats_main_t *sm = &stats_main;
2945 ssvm_private_t *ssvmp = &sm->stat_segment;
2946 vl_api_registration_t *regp;
2947 api_main_t *am = &api_main;
2949 vl_api_shm_elem_config_t *config = 0;
2950 vl_shmem_hdr_t *shmem_hdr;
2953 regp = vl_api_client_index_to_registration (mp->client_index);
2956 clib_warning ("API client disconnected");
2959 if (regp->registration_type != REGISTRATION_TYPE_SOCKET_SERVER)
2960 rv = VNET_API_ERROR_INVALID_REGISTRATION;
2962 rmp = vl_msg_api_alloc (sizeof (*rmp));
2963 rmp->_vl_msg_id = htons (VL_API_MAP_STATS_SEGMENT_REPLY);
2964 rmp->context = mp->context;
2965 rmp->retval = htonl (rv);
2967 vl_api_send_msg (regp, (u8 *) rmp);
2973 * We need the reply message to make it out the back door
2974 * before we send the magic fd message so force a flush
2976 cf = vl_api_registration_file (regp);
2977 cf->write_function (cf);
2979 /* Send the magic "here's your sign (aka fd)" socket message */
2980 vl_sock_api_send_fd_msg (cf->file_descriptor, ssvmp->fd);
2983 static clib_error_t *
2984 stats_init (vlib_main_t * vm)
2986 stats_main_t *sm = &stats_main;
2987 api_main_t *am = &api_main;
2988 void *vlib_worker_thread_bootstrap_fn (void *arg);
2991 sm->vnet_main = vnet_get_main ();
2992 sm->interface_main = &vnet_get_main ()->interface_main;
2994 sm->stats_poll_interval_in_seconds = 10;
2995 sm->data_structure_lock =
2996 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
2997 CLIB_CACHE_LINE_BYTES);
2998 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
3001 vl_msg_api_set_handlers(VL_API_##N, #n, \
3002 vl_api_##n##_t_handler, \
3004 vl_api_##n##_t_endian, \
3005 vl_api_##n##_t_print, \
3006 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
3010 /* tell the msg infra not to free these messages... */
3011 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
3012 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
3013 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
3014 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
3015 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
3016 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
3019 * Set up the (msg_name, crc, message-id) table
3021 setup_message_id_table (am);
3023 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
3024 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
3025 #define stats_reg(n) \
3026 sm->stats_registrations[IDX_##n] = 0; \
3027 sm->stats_registration_hash[IDX_##n] = 0;
3028 #include <vpp/stats/stats.reg>
3034 VLIB_INIT_FUNCTION (stats_init);
3037 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
3039 .function = stats_thread_fn,
3042 .no_data_structure_clone = 1,
3048 * fd.io coding-style-patch-verification: ON
3051 * eval: (c-set-style "gnu")