2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
25 stats_main_t stats_main;
27 #include <vnet/ip/ip.h>
29 #include <vpp/api/vpe_msg_enum.h>
32 #define f64_print(a,b)
34 #define vl_typedefs /* define message structures */
35 #include <vpp/api/vpe_all_api_h.h>
38 #define vl_endianfun /* define message structures */
39 #include <vpp/api/vpe_all_api_h.h>
42 /* instantiate all the print functions we know about */
43 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
45 #include <vpp/api/vpe_all_api_h.h>
48 #define foreach_stats_msg \
49 _(WANT_STATS, want_stats) \
50 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
51 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
52 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
53 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
55 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
56 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
57 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
58 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
59 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
60 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
61 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
62 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
63 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
64 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
65 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
66 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
67 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
68 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats)
70 #define vl_msg_name_crc_list
71 #include <vpp/stats/stats.api.h>
72 #undef vl_msg_name_crc_list
75 setup_message_id_table (api_main_t * am)
78 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
79 foreach_vl_msg_name_crc_stats;
83 /* These constants ensure msg sizes <= 1024, aka ring allocation */
84 #define SIMPLE_COUNTER_BATCH_SIZE 126
85 #define COMBINED_COUNTER_BATCH_SIZE 63
86 #define IP4_FIB_COUNTER_BATCH_SIZE 48
87 #define IP6_FIB_COUNTER_BATCH_SIZE 30
88 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
89 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
90 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
93 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
97 format_vnet_interface_combined_counters (u8 * s, va_list * args)
99 stats_main_t *sm = &stats_main;
100 vl_api_vnet_interface_combined_counters_t *mp =
101 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
104 u32 count, sw_if_index;
106 count = ntohl (mp->count);
107 sw_if_index = ntohl (mp->first_sw_if_index);
111 vp = (vlib_counter_t *) mp->data;
113 switch (mp->vnet_counter_type)
115 case VNET_INTERFACE_COUNTER_RX:
118 case VNET_INTERFACE_COUNTER_TX:
122 counter_name = "bogus";
125 for (i = 0; i < count; i++)
127 packets = clib_mem_unaligned (&vp->packets, u64);
128 packets = clib_net_to_host_u64 (packets);
129 bytes = clib_mem_unaligned (&vp->bytes, u64);
130 bytes = clib_net_to_host_u64 (bytes);
132 s = format (s, "%U.%s.packets %lld\n",
133 format_vnet_sw_if_index_name,
134 sm->vnet_main, sw_if_index, counter_name, packets);
135 s = format (s, "%U.%s.bytes %lld\n",
136 format_vnet_sw_if_index_name,
137 sm->vnet_main, sw_if_index, counter_name, bytes);
144 format_vnet_interface_simple_counters (u8 * s, va_list * args)
146 stats_main_t *sm = &stats_main;
147 vl_api_vnet_interface_simple_counters_t *mp =
148 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
150 u32 count, sw_if_index;
151 count = ntohl (mp->count);
152 sw_if_index = ntohl (mp->first_sw_if_index);
154 vp = (u64 *) mp->data;
157 switch (mp->vnet_counter_type)
159 case VNET_INTERFACE_COUNTER_DROP:
160 counter_name = "drop";
162 case VNET_INTERFACE_COUNTER_PUNT:
163 counter_name = "punt";
165 case VNET_INTERFACE_COUNTER_IP4:
166 counter_name = "ip4";
168 case VNET_INTERFACE_COUNTER_IP6:
169 counter_name = "ip6";
171 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
172 counter_name = "rx-no-buff";
174 case VNET_INTERFACE_COUNTER_RX_MISS:
175 counter_name = "rx-miss";
177 case VNET_INTERFACE_COUNTER_RX_ERROR:
178 counter_name = "rx-error (fifo-full)";
180 case VNET_INTERFACE_COUNTER_TX_ERROR:
181 counter_name = "tx-error (fifo-full)";
184 counter_name = "bogus";
187 for (i = 0; i < count; i++)
189 v = clib_mem_unaligned (vp, u64);
190 v = clib_net_to_host_u64 (v);
192 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
193 sm->vnet_main, sw_if_index, counter_name, v);
201 dslock (stats_main_t * sm, int release_hint, int tag)
204 data_structure_lock_t *l = sm->data_structure_lock;
206 if (PREDICT_FALSE (l == 0))
209 thread_index = vlib_get_thread_index ();
210 if (l->lock && l->thread_index == thread_index)
219 while (__sync_lock_test_and_set (&l->lock, 1))
222 l->thread_index = thread_index;
227 stats_dslock_with_hint (int hint, int tag)
229 stats_main_t *sm = &stats_main;
230 dslock (sm, hint, tag);
234 dsunlock (stats_main_t * sm)
237 data_structure_lock_t *l = sm->data_structure_lock;
239 if (PREDICT_FALSE (l == 0))
242 thread_index = vlib_get_thread_index ();
243 ASSERT (l->lock && l->thread_index == thread_index);
249 CLIB_MEMORY_BARRIER ();
255 stats_dsunlock (int hint, int tag)
257 stats_main_t *sm = &stats_main;
261 static vpe_client_registration_t *
262 get_client_for_stat (u32 reg, u32 item, u32 client_index)
264 stats_main_t *sm = &stats_main;
265 vpe_client_stats_registration_t *registration;
268 /* Is there anything listening for item in that reg */
269 p = hash_get (sm->stats_registration_hash[reg], item);
274 /* If there is, is our client_index one of them */
275 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
276 p = hash_get (registration->client_hash, client_index);
281 return pool_elt_at_index (registration->clients, p[0]);
286 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
288 stats_main_t *sm = &stats_main;
289 vpe_client_stats_registration_t *registration;
290 vpe_client_registration_t *cr;
293 /* Is there anything listening for item in that reg */
294 p = hash_get (sm->stats_registration_hash[reg], item);
298 pool_get (sm->stats_registrations[reg], registration);
299 registration->item = item;
300 hash_set (sm->stats_registration_hash[reg], item,
301 registration - sm->stats_registrations[reg]);
305 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
308 p = hash_get (registration->client_hash, client->client_index);
312 pool_get (registration->clients, cr);
313 cr->client_index = client->client_index;
314 cr->client_pid = client->client_pid;
315 hash_set (registration->client_hash, cr->client_index,
316 cr - registration->clients);
319 return 1; //At least one client is doing something ... poll
323 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
325 stats_main_t *sm = &stats_main;
326 vpe_client_stats_registration_t *registration;
327 vpe_client_registration_t *client;
331 /* Clear the client first */
332 /* Is there anything listening for item in that reg */
333 p = hash_get (sm->stats_registration_hash[reg], item);
338 /* If there is, is our client_index one of them */
339 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
340 p = hash_get (registration->client_hash, client_index);
345 client = pool_elt_at_index (registration->clients, p[0]);
346 hash_unset (registration->client_hash, client->client_index);
347 pool_put (registration->clients, client);
349 /* Now check if that was the last client for that item */
350 if (0 == pool_elts (registration->clients))
352 hash_unset (sm->stats_registration_hash[reg], item);
353 pool_put (sm->stats_registrations[reg], registration);
358 /* Now check if that was the last item in any of the listened to stats */
359 for (i = 0; i < STATS_REG_N_IDX; i++)
361 elts += pool_elts (sm->stats_registrations[i]);
366 vpe_client_registration_t *
367 get_clients_for_stat (u32 reg, u32 item)
369 stats_main_t *sm = &stats_main;
370 vpe_client_registration_t *client, *clients = 0;
371 vpe_client_stats_registration_t *registration;
374 /* Is there anything listening for item in that reg */
375 p = hash_get (sm->stats_registration_hash[reg], item);
380 /* If there is, is our client_index one of them */
381 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
383 vec_reset_length (clients);
384 pool_foreach (client, registration->clients, (
386 vec_add1 (clients, *client);}
393 clear_client_reg (u32 ** registrations)
395 /* When registrations[x] is a vector of pool indices
396 here is a good place to clean up the pools
398 #define stats_reg(n) vec_free(registrations[IDX_##n]);
399 #include <vpp/stats/stats.reg>
402 vec_free (registrations);
406 init_client_reg (u32 ** registrations)
410 Initialise the stats registrations for each
411 type of stat a client can register for as well as
412 a vector of "interested" indexes.
413 Initially this is a u32 of either sw_if_index or fib_index
414 but eventually this should migrate to a pool_index (u32)
415 with a type specific pool that can include more complex things
416 such as timing and structured events.
418 vec_validate (registrations, STATS_REG_N_IDX);
419 #define stats_reg(n) \
420 vec_reset_length(registrations[IDX_##n]);
421 #include <vpp/stats/stats.reg>
425 When registrations[x] is a vector of pool indices, here
426 is a good place to init the pools.
428 return registrations;
432 enable_all_client_reg (u32 ** registrations)
436 Enable all stats known by adding
437 ~0 to the index vector. Eventually this
438 should be deprecated.
440 #define stats_reg(n) \
441 vec_add1(registrations[IDX_##n], ~0);
442 #include <vpp/stats/stats.reg>
444 return registrations;
448 do_simple_interface_counters (stats_main_t * sm)
450 vl_api_vnet_interface_simple_counters_t *mp = 0;
451 vnet_interface_main_t *im = sm->interface_main;
452 api_main_t *am = sm->api_main;
453 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
454 svm_queue_t *q = shmem_hdr->vl_input_queue;
455 vlib_simple_counter_main_t *cm;
456 u32 items_this_message = 0;
461 * Prevent interface registration from expanding / moving the vectors...
462 * That tends never to happen, so we can hold this lock for a while.
464 vnet_interface_counter_lock (im);
466 vec_foreach (cm, im->sw_if_counters)
468 n_counts = vlib_simple_counter_n_counters (cm);
469 for (i = 0; i < n_counts; i++)
473 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
476 mp = vl_msg_api_alloc_as_if_client
477 (sizeof (*mp) + items_this_message * sizeof (v));
478 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
479 mp->vnet_counter_type = cm - im->sw_if_counters;
480 mp->first_sw_if_index = htonl (i);
482 vp = (u64 *) mp->data;
484 v = vlib_get_simple_counter (cm, i);
485 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
488 if (mp->count == items_this_message)
490 mp->count = htonl (items_this_message);
491 /* Send to the main thread... */
492 vl_msg_api_send_shmem (q, (u8 *) & mp);
498 vnet_interface_counter_unlock (im);
502 handle_client_registration (vpe_client_registration_t * client, u32 stat,
503 u32 item, int enable_disable)
505 stats_main_t *sm = &stats_main;
506 vpe_client_registration_t *rp, _rp;
508 rp = get_client_for_stat (stat, item, client->client_index);
511 if (enable_disable == 0)
513 if (!rp) // No client to disable
515 clib_warning ("pid %d: already disabled for stats...",
520 clear_client_for_stat (stat, item, client->client_index);
527 rp->client_index = client->client_index;
528 rp->client_pid = client->client_pid;
529 sm->enable_poller = set_client_for_stat (stat, item, rp);
534 /**********************************
535 * ALL Interface Combined stats - to be deprecated
536 **********************************/
539 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
542 vl_api_want_interface_combined_stats_t_handler
543 (vl_api_want_interface_combined_stats_t * mp)
545 stats_main_t *sm = &stats_main;
546 vpe_client_registration_t rp;
547 vl_api_want_interface_combined_stats_reply_t *rmp;
550 vl_api_registration_t *reg;
553 swif = ~0; //Using same mechanism as _per_interface_
554 rp.client_index = mp->client_index;
555 rp.client_pid = mp->pid;
557 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
561 reg = vl_api_client_index_to_registration (mp->client_index);
565 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
570 rmp = vl_msg_api_alloc (sizeof (*rmp));
571 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
572 rmp->context = mp->context;
573 rmp->retval = retval;
575 vl_api_send_msg (reg, (u8 *) rmp);
579 vl_api_vnet_interface_combined_counters_t_handler
580 (vl_api_vnet_interface_combined_counters_t * mp)
582 vpe_client_registration_t *clients, client;
583 stats_main_t *sm = &stats_main;
584 vl_api_registration_t *reg, *reg_prev = NULL;
585 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
589 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
592 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
593 ~0 /*flag for all */ );
595 for (i = 0; i < vec_len (clients); i++)
598 reg = vl_api_client_index_to_registration (client.client_index);
601 if (reg_prev && vl_api_can_send_msg (reg_prev))
603 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
604 clib_memcpy (mp_copy, mp, mp_size);
605 vl_api_send_msg (reg_prev, (u8 *) mp);
612 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
615 if (reg_prev && vl_api_can_send_msg (reg_prev))
617 vl_api_send_msg (reg_prev, (u8 *) mp);
621 vl_msg_api_free (mp);
626 do_combined_interface_counters (stats_main_t * sm)
628 vl_api_vnet_interface_combined_counters_t *mp = 0;
629 vnet_interface_main_t *im = sm->interface_main;
630 api_main_t *am = sm->api_main;
631 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
632 svm_queue_t *q = shmem_hdr->vl_input_queue;
633 vlib_combined_counter_main_t *cm;
634 u32 items_this_message = 0;
635 vlib_counter_t v, *vp = 0;
638 vnet_interface_counter_lock (im);
640 vec_foreach (cm, im->combined_sw_if_counters)
642 n_counts = vlib_combined_counter_n_counters (cm);
643 for (i = 0; i < n_counts; i++)
647 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
650 mp = vl_msg_api_alloc_as_if_client
651 (sizeof (*mp) + items_this_message * sizeof (v));
652 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
653 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
654 mp->first_sw_if_index = htonl (i);
656 vp = (vlib_counter_t *) mp->data;
658 vlib_get_combined_counter (cm, i, &v);
659 clib_mem_unaligned (&vp->packets, u64)
660 = clib_host_to_net_u64 (v.packets);
661 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
664 if (mp->count == items_this_message)
666 mp->count = htonl (items_this_message);
667 /* Send to the main thread... */
668 vl_msg_api_send_shmem (q, (u8 *) & mp);
674 vnet_interface_counter_unlock (im);
677 /**********************************
678 * Per Interface Combined stats
679 **********************************/
681 /* Request from client registering interfaces it wants */
683 vl_api_want_per_interface_combined_stats_t_handler
684 (vl_api_want_per_interface_combined_stats_t * mp)
686 stats_main_t *sm = &stats_main;
687 vpe_client_registration_t rp;
688 vl_api_want_per_interface_combined_stats_reply_t *rmp;
689 vlib_combined_counter_main_t *cm;
692 vl_api_registration_t *reg;
693 u32 i, swif, num = 0;
695 num = ntohl (mp->num);
698 * Validate sw_if_indexes before registering
700 for (i = 0; i < num; i++)
702 swif = ntohl (mp->sw_ifs[i]);
705 * Check its a real sw_if_index that the client is allowed to see
709 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
711 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
717 for (i = 0; i < num; i++)
719 swif = ntohl (mp->sw_ifs[i]);
721 rp.client_index = mp->client_index;
722 rp.client_pid = mp->pid;
723 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
724 swif, ntohl (mp->enable_disable));
728 reg = vl_api_client_index_to_registration (mp->client_index);
731 for (i = 0; i < num; i++)
733 swif = ntohl (mp->sw_ifs[i]);
736 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
742 rmp = vl_msg_api_alloc (sizeof (*rmp));
743 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
744 rmp->context = mp->context;
745 rmp->retval = retval;
747 vl_api_send_msg (reg, (u8 *) rmp);
750 /* Per Interface Combined distribution to client */
752 do_combined_per_interface_counters (stats_main_t * sm)
754 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
755 vnet_interface_main_t *im = sm->interface_main;
756 api_main_t *am = sm->api_main;
757 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
758 vl_api_registration_t *vl_reg;
759 vlib_combined_counter_main_t *cm;
760 vl_api_vnet_combined_counter_t *vp = 0;
763 vpe_client_stats_registration_t *reg;
764 vpe_client_registration_t *client;
765 u32 *sw_if_index = 0;
767 vnet_interface_counter_lock (im);
769 vec_reset_length (sm->regs_tmp);
773 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
774 ({ vec_add1 (sm->regs_tmp, reg); }));
777 for (i = 0; i < vec_len (sm->regs_tmp); i++)
779 reg = sm->regs_tmp[i];
782 vnet_interface_counter_unlock (im);
783 do_combined_interface_counters (sm);
784 vnet_interface_counter_lock (im);
787 vec_reset_length (sm->clients_tmp);
790 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
794 for (j = 0; j < vec_len (sm->clients_tmp); j++)
796 client = sm->clients_tmp[j];
798 vl_reg = vl_api_client_index_to_registration (client->client_index);
800 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
804 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
805 reg->item, client->client_index);
808 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
809 memset (mp, 0, sizeof (*mp));
812 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
815 * count will eventually be used to optimise the batching
816 * of per client messages for each stat. For now setting this to 1 then
817 * iterate. This will not affect API.
819 * FIXME instead of enqueueing here, this should be sent to a batch
820 * storer for per-client transmission. Each "mp" sent would be a single entry
821 * and if a client is listening to other sw_if_indexes for same, it would be
822 * appended to that *mp
826 * - capturing the timestamp of the counters "when VPP knew them" is important.
827 * Less so is that the timing of the delivery to the control plane be in the same
830 * i.e. As long as the control plane can delta messages from VPP and work out
831 * velocity etc based on the timestamp, it can do so in a more "batch mode".
833 * It would be beneficial to keep a "per-client" message queue, and then
834 * batch all the stat messages for a client into one message, with
835 * discrete timestamps.
837 * Given this particular API is for "per interface" one assumes that the scale
838 * is less than the ~0 case, which the prior API is suited for.
842 * 1 message per api call for now
844 mp->count = htonl (1);
845 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
847 vp = (vl_api_vnet_combined_counter_t *) mp->data;
848 vp->sw_if_index = htonl (reg->item);
850 im = &vnet_get_main ()->interface_main;
853 cm = im->combined_sw_if_counters + X; \
854 vlib_get_combined_counter (cm, reg->item, &v); \
855 clib_mem_unaligned (&vp->x##_packets, u64) = \
856 clib_host_to_net_u64 (v.packets); \
857 clib_mem_unaligned (&vp->x##_bytes, u64) = \
858 clib_host_to_net_u64 (v.bytes);
861 _(VNET_INTERFACE_COUNTER_RX, rx);
862 _(VNET_INTERFACE_COUNTER_TX, tx);
863 _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
864 _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
865 _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
866 _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
867 _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
868 _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
872 vl_api_send_msg (vl_reg, (u8 *) mp);
876 vnet_interface_counter_unlock (im);
879 /**********************************
880 * Per Interface simple stats
881 **********************************/
883 /* Request from client registering interfaces it wants */
885 vl_api_want_per_interface_simple_stats_t_handler
886 (vl_api_want_per_interface_simple_stats_t * mp)
888 stats_main_t *sm = &stats_main;
889 vpe_client_registration_t rp;
890 vl_api_want_per_interface_simple_stats_reply_t *rmp;
891 vlib_simple_counter_main_t *cm;
894 vl_api_registration_t *reg;
895 u32 i, swif, num = 0;
897 num = ntohl (mp->num);
899 for (i = 0; i < num; i++)
901 swif = ntohl (mp->sw_ifs[i]);
903 /* Check its a real sw_if_index that the client is allowed to see */
906 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
908 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
914 for (i = 0; i < num; i++)
916 swif = ntohl (mp->sw_ifs[i]);
918 rp.client_index = mp->client_index;
919 rp.client_pid = mp->pid;
920 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
921 swif, ntohl (mp->enable_disable));
925 reg = vl_api_client_index_to_registration (mp->client_index);
927 /* Client may have disconnected abruptly, clean up */
930 for (i = 0; i < num; i++)
932 swif = ntohl (mp->sw_ifs[i]);
934 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
942 rmp = vl_msg_api_alloc (sizeof (*rmp));
943 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
944 rmp->context = mp->context;
945 rmp->retval = retval;
947 vl_api_send_msg (reg, (u8 *) rmp);
950 /* Per Interface Simple distribution to client */
952 do_simple_per_interface_counters (stats_main_t * sm)
954 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
955 vnet_interface_main_t *im = sm->interface_main;
956 api_main_t *am = sm->api_main;
957 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
958 vl_api_registration_t *vl_reg;
959 vlib_simple_counter_main_t *cm;
961 vpe_client_stats_registration_t *reg;
962 vpe_client_registration_t *client;
963 u32 timestamp, count;
964 vl_api_vnet_simple_counter_t *vp = 0;
967 vnet_interface_counter_lock (im);
969 vec_reset_length (sm->regs_tmp);
973 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
974 ({ vec_add1 (sm->regs_tmp, reg); }));
977 for (i = 0; i < vec_len (sm->regs_tmp); i++)
979 reg = sm->regs_tmp[i];
982 vnet_interface_counter_unlock (im);
983 do_simple_interface_counters (sm);
984 vnet_interface_counter_lock (im);
987 vec_reset_length (sm->clients_tmp);
990 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
994 for (j = 0; j < vec_len (sm->clients_tmp); j++)
996 client = sm->clients_tmp[j];
997 vl_reg = vl_api_client_index_to_registration (client->client_index);
999 /* Client may have disconnected abrubtly, clean up */
1003 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1004 reg->item, client->client_index);
1008 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1009 memset (mp, 0, sizeof (*mp));
1010 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1013 * count will eventually be used to optimise the batching
1014 * of per client messages for each stat. For now setting this to 1 then
1015 * iterate. This will not affect API.
1017 * FIXME instead of enqueueing here, this should be sent to a batch
1018 * storer for per-client transmission. Each "mp" sent would be a single entry
1019 * and if a client is listening to other sw_if_indexes for same, it would be
1020 * appended to that *mp
1024 * - capturing the timestamp of the counters "when VPP knew them" is important.
1025 * Less so is that the timing of the delivery to the control plane be in the same
1028 * i.e. As long as the control plane can delta messages from VPP and work out
1029 * velocity etc based on the timestamp, it can do so in a more "batch mode".
1031 * It would be beneficial to keep a "per-client" message queue, and then
1032 * batch all the stat messages for a client into one message, with
1033 * discrete timestamps.
1035 * Given this particular API is for "per interface" one assumes that the scale
1036 * is less than the ~0 case, which the prior API is suited for.
1040 * 1 message per api call for now
1042 mp->count = htonl (1);
1043 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1044 vp = (vl_api_vnet_simple_counter_t *) mp->data;
1046 vp->sw_if_index = htonl (reg->item);
1048 // VNET_INTERFACE_COUNTER_DROP
1049 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1050 v = vlib_get_simple_counter (cm, reg->item);
1051 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1053 // VNET_INTERFACE_COUNTER_PUNT
1054 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1055 v = vlib_get_simple_counter (cm, reg->item);
1056 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1058 // VNET_INTERFACE_COUNTER_IP4
1059 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1060 v = vlib_get_simple_counter (cm, reg->item);
1061 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1063 //VNET_INTERFACE_COUNTER_IP6
1064 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1065 v = vlib_get_simple_counter (cm, reg->item);
1066 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1068 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1069 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1070 v = vlib_get_simple_counter (cm, reg->item);
1071 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1072 clib_host_to_net_u64 (v);
1074 //VNET_INTERFACE_COUNTER_RX_MISS
1075 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1076 v = vlib_get_simple_counter (cm, reg->item);
1077 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1079 //VNET_INTERFACE_COUNTER_RX_ERROR
1080 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1081 v = vlib_get_simple_counter (cm, reg->item);
1082 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1084 //VNET_INTERFACE_COUNTER_TX_ERROR
1085 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1086 v = vlib_get_simple_counter (cm, reg->item);
1087 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1089 //VNET_INTERFACE_COUNTER_MPLS
1090 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1091 v = vlib_get_simple_counter (cm, reg->item);
1092 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1094 vl_api_send_msg (vl_reg, (u8 *) mp);
1098 vnet_interface_counter_unlock (im);
1101 /**********************************
1103 **********************************/
1106 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1108 struct timespec _req, *req = &_req;
1109 struct timespec _rem, *rem = &_rem;
1112 req->tv_nsec = nsec;
1115 if (nanosleep (req, rem) == 0)
1120 clib_unix_warning ("nanosleep");
1126 * @brief The context passed when collecting adjacency counters
1128 typedef struct ip4_nbr_stats_ctx_t_
1131 * The SW IF index all these adjs belong to
1136 * A vector of ip4 nbr counters
1138 vl_api_ip4_nbr_counter_t *counters;
1139 } ip4_nbr_stats_ctx_t;
1141 static adj_walk_rc_t
1142 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1144 vl_api_ip4_nbr_counter_t *vl_counter;
1145 vlib_counter_t adj_counter;
1146 ip4_nbr_stats_ctx_t *ctx;
1147 ip_adjacency_t *adj;
1150 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1152 if (0 != adj_counter.packets)
1154 vec_add2 (ctx->counters, vl_counter, 1);
1157 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1158 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1159 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1160 vl_counter->link_type = adj->ia_link;
1162 return (ADJ_WALK_RC_CONTINUE);
1165 #define MIN(x,y) (((x)<(y))?(x):(y))
1168 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1170 api_main_t *am = sm->api_main;
1171 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1172 svm_queue_t *q = shmem_hdr->vl_input_queue;
1173 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1177 * If the walk context has counters, which may be left over from the last
1178 * suspend, then we continue from there.
1180 while (0 != vec_len (ctx->counters))
1182 u32 n_items = MIN (vec_len (ctx->counters),
1183 IP4_FIB_COUNTER_BATCH_SIZE);
1186 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1188 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1191 (vl_api_ip4_nbr_counter_t)));
1192 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1193 mp->count = ntohl (n_items);
1194 mp->sw_if_index = ntohl (ctx->sw_if_index);
1199 * copy the counters from the back of the context, then we can easily
1200 * 'erase' them by resetting the vector length.
1201 * The order we push the stats to the caller is not important.
1204 &ctx->counters[vec_len (ctx->counters) - n_items],
1205 n_items * sizeof (*ctx->counters));
1207 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1213 pause = svm_queue_is_full (q);
1215 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1216 svm_queue_unlock (q);
1220 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1221 STATS_RELEASE_DELAY_NS);
1226 do_ip4_nbr_counters (stats_main_t * sm)
1228 vnet_main_t *vnm = vnet_get_main ();
1229 vnet_interface_main_t *im = &vnm->interface_main;
1230 vnet_sw_interface_t *si;
1232 ip4_nbr_stats_ctx_t ctx = {
1238 pool_foreach (si, im->sw_interfaces,
1241 * update the interface we are now concerned with
1243 ctx.sw_if_index = si->sw_if_index;
1246 * we are about to walk another interface, so we shouldn't have any pending
1249 ASSERT(ctx.counters == NULL);
1252 * visit each neighbour adjacency on the interface and collect
1253 * its current stats.
1254 * Because we hold the lock the walk is synchronous, so safe to routing
1255 * updates. It's limited in work by the number of adjacenies on an
1256 * interface, which is typically not huge.
1258 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1259 adj_nbr_walk (si->sw_if_index,
1266 * if this interface has some adjacencies with counters then ship them,
1267 * else continue to the next interface.
1269 if (NULL != ctx.counters)
1271 ip4_nbr_ship(sm, &ctx);
1278 * @brief The context passed when collecting adjacency counters
1280 typedef struct ip6_nbr_stats_ctx_t_
1283 * The SW IF index all these adjs belong to
1288 * A vector of ip6 nbr counters
1290 vl_api_ip6_nbr_counter_t *counters;
1291 } ip6_nbr_stats_ctx_t;
1293 static adj_walk_rc_t
1294 ip6_nbr_stats_cb (adj_index_t ai,
1297 vl_api_ip6_nbr_counter_t *vl_counter;
1298 vlib_counter_t adj_counter;
1299 ip6_nbr_stats_ctx_t *ctx;
1300 ip_adjacency_t *adj;
1303 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1305 if (0 != adj_counter.packets)
1307 vec_add2(ctx->counters, vl_counter, 1);
1310 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1311 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1312 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1313 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1314 vl_counter->link_type = adj->ia_link;
1316 return (ADJ_WALK_RC_CONTINUE);
1319 #define MIN(x,y) (((x)<(y))?(x):(y))
1322 ip6_nbr_ship (stats_main_t * sm,
1323 ip6_nbr_stats_ctx_t *ctx)
1325 api_main_t *am = sm->api_main;
1326 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1327 svm_queue_t *q = shmem_hdr->vl_input_queue;
1328 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1332 * If the walk context has counters, which may be left over from the last
1333 * suspend, then we continue from there.
1335 while (0 != vec_len(ctx->counters))
1337 u32 n_items = MIN (vec_len (ctx->counters),
1338 IP6_FIB_COUNTER_BATCH_SIZE);
1341 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1343 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1346 (vl_api_ip6_nbr_counter_t)));
1347 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1348 mp->count = ntohl (n_items);
1349 mp->sw_if_index = ntohl (ctx->sw_if_index);
1354 * copy the counters from the back of the context, then we can easily
1355 * 'erase' them by resetting the vector length.
1356 * The order we push the stats to the caller is not important.
1359 &ctx->counters[vec_len (ctx->counters) - n_items],
1360 n_items * sizeof (*ctx->counters));
1362 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1368 pause = svm_queue_is_full (q);
1370 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1371 svm_queue_unlock (q);
1375 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1376 STATS_RELEASE_DELAY_NS);
1381 do_ip6_nbr_counters (stats_main_t * sm)
1383 vnet_main_t *vnm = vnet_get_main ();
1384 vnet_interface_main_t *im = &vnm->interface_main;
1385 vnet_sw_interface_t *si;
1387 ip6_nbr_stats_ctx_t ctx = {
1393 pool_foreach (si, im->sw_interfaces,
1396 * update the interface we are now concerned with
1398 ctx.sw_if_index = si->sw_if_index;
1401 * we are about to walk another interface, so we shouldn't have any pending
1404 ASSERT(ctx.counters == NULL);
1407 * visit each neighbour adjacency on the interface and collect
1408 * its current stats.
1409 * Because we hold the lock the walk is synchronous, so safe to routing
1410 * updates. It's limited in work by the number of adjacenies on an
1411 * interface, which is typically not huge.
1413 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1414 adj_nbr_walk (si->sw_if_index,
1421 * if this interface has some adjacencies with counters then ship them,
1422 * else continue to the next interface.
1424 if (NULL != ctx.counters)
1426 ip6_nbr_ship(sm, &ctx);
1433 do_ip4_fib_counters (stats_main_t * sm)
1435 ip4_main_t *im4 = &ip4_main;
1436 api_main_t *am = sm->api_main;
1437 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1438 svm_queue_t *q = shmem_hdr->vl_input_queue;
1442 do_ip46_fibs_t *do_fibs;
1443 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1444 u32 items_this_message;
1445 vl_api_ip4_fib_counter_t *ctrp = 0;
1446 u32 start_at_fib_index = 0;
1449 do_fibs = &sm->do_ip46_fibs;
1452 vec_reset_length (do_fibs->fibs);
1454 pool_foreach (fib, im4->fibs,
1455 ({vec_add1(do_fibs->fibs,fib);}));
1459 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1461 fib = do_fibs->fibs[j];
1462 /* We may have bailed out due to control-plane activity */
1463 while ((fib - im4->fibs) < start_at_fib_index)
1466 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1470 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1471 mp = vl_msg_api_alloc_as_if_client
1473 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1474 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1476 mp->vrf_id = ntohl (fib->ft_table_id);
1477 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1481 /* happens if the last FIB was empty... */
1482 ASSERT (mp->count == 0);
1483 mp->vrf_id = ntohl (fib->ft_table_id);
1486 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1488 vec_reset_length (do_fibs->ip4routes);
1489 vec_reset_length (do_fibs->results);
1491 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1493 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1497 vec_reset_length (do_fibs->pvec);
1499 x.address_length = i;
1501 hash_foreach_pair (p, hash, (
1503 vec_add1 (do_fibs->pvec, p);}
1505 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1507 p = do_fibs->pvec[k];
1508 x.address.data_u32 = p->key;
1509 x.index = p->value[0];
1511 vec_add1 (do_fibs->ip4routes, x);
1512 if (sm->data_structure_lock->release_hint)
1514 start_at_fib_index = fib - im4->fibs;
1516 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1517 STATS_RELEASE_DELAY_NS);
1519 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1525 vec_foreach (r, do_fibs->ip4routes)
1528 const dpo_id_t *dpo_id;
1531 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1532 index = (u32) dpo_id->dpoi_index;
1534 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1537 * If it has actually
1538 * seen at least one packet, send it.
1543 /* already in net byte order */
1544 ctrp->address = r->address.as_u32;
1545 ctrp->address_length = r->address_length;
1546 ctrp->packets = clib_host_to_net_u64 (c.packets);
1547 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1551 if (mp->count == items_this_message)
1553 mp->count = htonl (items_this_message);
1555 * If the main thread's input queue is stuffed,
1556 * drop the data structure lock (which the main thread
1557 * may want), and take a pause.
1560 if (svm_queue_is_full (q))
1563 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1564 svm_queue_unlock (q);
1566 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1567 STATS_RELEASE_DELAY_NS);
1570 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1571 svm_queue_unlock (q);
1573 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1574 mp = vl_msg_api_alloc_as_if_client
1576 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1577 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1579 mp->vrf_id = ntohl (fib->ft_table_id);
1580 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1582 } /* for each (mp or single) adj */
1583 if (sm->data_structure_lock->release_hint)
1585 start_at_fib_index = fib - im4->fibs;
1587 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1589 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1592 } /* vec_foreach (routes) */
1596 /* Flush any data from this fib */
1599 mp->count = htonl (mp->count);
1600 vl_msg_api_send_shmem (q, (u8 *) & mp);
1605 /* If e.g. the last FIB had no reportable routes, free the buffer */
1607 vl_msg_api_free (mp);
1611 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1613 stats_main_t *sm = ctx;
1614 do_ip46_fibs_t *do_fibs;
1615 mfib_entry_t *entry;
1617 do_fibs = &sm->do_ip46_fibs;
1618 entry = mfib_entry_get (fei);
1620 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1626 do_ip4_mfib_counters (stats_main_t * sm)
1628 ip4_main_t *im4 = &ip4_main;
1629 api_main_t *am = sm->api_main;
1630 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1631 svm_queue_t *q = shmem_hdr->vl_input_queue;
1634 do_ip46_fibs_t *do_fibs;
1635 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1636 u32 items_this_message;
1637 vl_api_ip4_mfib_counter_t *ctrp = 0;
1638 u32 start_at_mfib_index = 0;
1641 do_fibs = &sm->do_ip46_fibs;
1643 vec_reset_length (do_fibs->mfibs);
1645 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1648 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1650 mfib = do_fibs->mfibs[j];
1651 /* We may have bailed out due to control-plane activity */
1652 while ((mfib - im4->mfibs) < start_at_mfib_index)
1657 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1658 mp = vl_msg_api_alloc_as_if_client
1660 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1661 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1663 mp->vrf_id = ntohl (mfib->mft_table_id);
1664 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1668 /* happens if the last MFIB was empty... */
1669 ASSERT (mp->count == 0);
1670 mp->vrf_id = ntohl (mfib->mft_table_id);
1673 vec_reset_length (do_fibs->mroutes);
1676 * walk the table with table updates blocked
1678 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1680 mfib_table_walk (mfib->mft_index,
1681 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1684 vec_foreach (pfx, do_fibs->mroutes)
1686 const dpo_id_t *dpo_id;
1687 fib_node_index_t mfei;
1692 * re-lookup the entry, since we suspend during the collection
1694 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1696 if (FIB_NODE_INDEX_INVALID == mfei)
1699 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1700 index = (u32) dpo_id->dpoi_index;
1702 vlib_get_combined_counter (&replicate_main.repm_counters,
1703 dpo_id->dpoi_index, &c);
1705 * If it has seen at least one packet, send it.
1709 /* already in net byte order */
1710 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1711 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1712 ctrp->group_length = pfx->fp_len;
1713 ctrp->packets = clib_host_to_net_u64 (c.packets);
1714 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1718 if (mp->count == items_this_message)
1720 mp->count = htonl (items_this_message);
1722 * If the main thread's input queue is stuffed,
1723 * drop the data structure lock (which the main thread
1724 * may want), and take a pause.
1728 while (svm_queue_is_full (q))
1730 svm_queue_unlock (q);
1731 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1732 STATS_RELEASE_DELAY_NS);
1735 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1736 svm_queue_unlock (q);
1738 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1739 mp = vl_msg_api_alloc_as_if_client
1741 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1742 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1744 mp->vrf_id = ntohl (mfib->mft_table_id);
1745 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1750 /* Flush any data from this mfib */
1753 mp->count = htonl (mp->count);
1754 vl_msg_api_send_shmem (q, (u8 *) & mp);
1759 /* If e.g. the last FIB had no reportable routes, free the buffer */
1761 vl_msg_api_free (mp);
1765 do_ip6_mfib_counters (stats_main_t * sm)
1767 ip6_main_t *im6 = &ip6_main;
1768 api_main_t *am = sm->api_main;
1769 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1770 svm_queue_t *q = shmem_hdr->vl_input_queue;
1773 do_ip46_fibs_t *do_fibs;
1774 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1775 u32 items_this_message;
1776 vl_api_ip6_mfib_counter_t *ctrp = 0;
1777 u32 start_at_mfib_index = 0;
1780 do_fibs = &sm->do_ip46_fibs;
1782 vec_reset_length (do_fibs->mfibs);
1784 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1787 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1789 mfib = do_fibs->mfibs[j];
1790 /* We may have bailed out due to control-plane activity */
1791 while ((mfib - im6->mfibs) < start_at_mfib_index)
1796 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1797 mp = vl_msg_api_alloc_as_if_client
1799 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1800 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1802 mp->vrf_id = ntohl (mfib->mft_table_id);
1803 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1807 /* happens if the last MFIB was empty... */
1808 ASSERT (mp->count == 0);
1809 mp->vrf_id = ntohl (mfib->mft_table_id);
1812 vec_reset_length (do_fibs->mroutes);
1815 * walk the table with table updates blocked
1817 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1819 mfib_table_walk (mfib->mft_index,
1820 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1823 vec_foreach (pfx, do_fibs->mroutes)
1825 const dpo_id_t *dpo_id;
1826 fib_node_index_t mfei;
1831 * re-lookup the entry, since we suspend during the collection
1833 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1835 if (FIB_NODE_INDEX_INVALID == mfei)
1838 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1839 index = (u32) dpo_id->dpoi_index;
1841 vlib_get_combined_counter (&replicate_main.repm_counters,
1842 dpo_id->dpoi_index, &c);
1844 * If it has seen at least one packet, send it.
1848 /* already in net byte order */
1849 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1850 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1851 ctrp->group_length = pfx->fp_len;
1852 ctrp->packets = clib_host_to_net_u64 (c.packets);
1853 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1857 if (mp->count == items_this_message)
1859 mp->count = htonl (items_this_message);
1861 * If the main thread's input queue is stuffed,
1862 * drop the data structure lock (which the main thread
1863 * may want), and take a pause.
1867 while (svm_queue_is_full (q))
1869 svm_queue_unlock (q);
1870 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1871 STATS_RELEASE_DELAY_NS);
1874 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1875 svm_queue_unlock (q);
1877 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1878 mp = vl_msg_api_alloc_as_if_client
1880 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1881 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1883 mp->vrf_id = ntohl (mfib->mft_table_id);
1884 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1889 /* Flush any data from this mfib */
1892 mp->count = htonl (mp->count);
1893 vl_msg_api_send_shmem (q, (u8 *) & mp);
1898 /* If e.g. the last FIB had no reportable routes, free the buffer */
1900 vl_msg_api_free (mp);
1906 ip6_route_t **routep;
1908 } add_routes_in_fib_arg_t;
1911 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1913 add_routes_in_fib_arg_t *ap = arg;
1914 stats_main_t *sm = ap->sm;
1916 if (sm->data_structure_lock->release_hint)
1917 clib_longjmp (&sm->jmp_buf, 1);
1919 if (kvp->key[2] >> 32 == ap->fib_index)
1921 ip6_address_t *addr;
1923 addr = (ip6_address_t *) kvp;
1924 vec_add2 (*ap->routep, r, 1);
1925 r->address = addr[0];
1926 r->address_length = kvp->key[2] & 0xFF;
1927 r->index = kvp->value;
1932 do_ip6_fib_counters (stats_main_t * sm)
1934 ip6_main_t *im6 = &ip6_main;
1935 api_main_t *am = sm->api_main;
1936 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1937 svm_queue_t *q = shmem_hdr->vl_input_queue;
1940 do_ip46_fibs_t *do_fibs;
1941 vl_api_vnet_ip6_fib_counters_t *mp = 0;
1942 u32 items_this_message;
1943 vl_api_ip6_fib_counter_t *ctrp = 0;
1944 u32 start_at_fib_index = 0;
1945 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
1946 add_routes_in_fib_arg_t _a, *a = &_a;
1949 do_fibs = &sm->do_ip46_fibs;
1951 vec_reset_length (do_fibs->fibs);
1953 pool_foreach (fib, im6->fibs,
1954 ({vec_add1(do_fibs->fibs,fib);}));
1958 for (i = 0; i < vec_len (do_fibs->fibs); i++)
1960 fib = do_fibs->fibs[i];
1961 /* We may have bailed out due to control-plane activity */
1962 while ((fib - im6->fibs) < start_at_fib_index)
1967 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1968 mp = vl_msg_api_alloc_as_if_client
1970 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1971 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1973 mp->vrf_id = ntohl (fib->ft_table_id);
1974 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1977 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1979 vec_reset_length (do_fibs->ip6routes);
1980 vec_reset_length (do_fibs->results);
1982 a->fib_index = fib - im6->fibs;
1983 a->routep = &do_fibs->ip6routes;
1986 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
1988 start_at_fib_index = fib - im6->fibs;
1989 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
1994 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1995 STATS_RELEASE_DELAY_NS);
1997 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2001 vec_foreach (r, do_fibs->ip6routes)
2005 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2008 * If it has actually
2009 * seen at least one packet, send it.
2013 /* already in net byte order */
2014 ctrp->address[0] = r->address.as_u64[0];
2015 ctrp->address[1] = r->address.as_u64[1];
2016 ctrp->address_length = (u8) r->address_length;
2017 ctrp->packets = clib_host_to_net_u64 (c.packets);
2018 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2022 if (mp->count == items_this_message)
2024 mp->count = htonl (items_this_message);
2026 * If the main thread's input queue is stuffed,
2027 * drop the data structure lock (which the main thread
2028 * may want), and take a pause.
2031 if (svm_queue_is_full (q))
2034 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2035 svm_queue_unlock (q);
2037 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2038 STATS_RELEASE_DELAY_NS);
2041 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2042 svm_queue_unlock (q);
2044 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2045 mp = vl_msg_api_alloc_as_if_client
2047 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2048 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2050 mp->vrf_id = ntohl (fib->ft_table_id);
2051 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2055 if (sm->data_structure_lock->release_hint)
2057 start_at_fib_index = fib - im6->fibs;
2059 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2061 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2064 } /* vec_foreach (routes) */
2068 /* Flush any data from this fib */
2071 mp->count = htonl (mp->count);
2072 vl_msg_api_send_shmem (q, (u8 *) & mp);
2077 /* If e.g. the last FIB had no reportable routes, free the buffer */
2079 vl_msg_api_free (mp);
2082 typedef struct udp_encap_stat_t_
2088 typedef struct udp_encap_stats_walk_t_
2090 udp_encap_stat_t *stats;
2091 } udp_encap_stats_walk_t;
2094 udp_encap_stats_walk_cb (index_t uei, void *arg)
2096 udp_encap_stats_walk_t *ctx = arg;
2097 udp_encap_stat_t *stat;
2100 ue = udp_encap_get (uei);
2101 vec_add2 (ctx->stats, stat, 1);
2104 udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]);
2110 udp_encap_ship (udp_encap_stats_walk_t * ctx)
2112 vl_api_vnet_udp_encap_counters_t *mp;
2113 vl_shmem_hdr_t *shmem_hdr;
2121 shmem_hdr = am->shmem_hdr;
2122 q = shmem_hdr->vl_input_queue;
2125 * If the walk context has counters, which may be left over from the last
2126 * suspend, then we continue from there.
2128 while (0 != vec_len (ctx->stats))
2130 u32 n_items = MIN (vec_len (ctx->stats),
2131 UDP_ENCAP_COUNTER_BATCH_SIZE);
2134 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2136 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2139 (vl_api_udp_encap_counter_t)));
2140 mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2141 mp->count = ntohl (n_items);
2144 * copy the counters from the back of the context, then we can easily
2145 * 'erase' them by resetting the vector length.
2146 * The order we push the stats to the caller is not important.
2149 &ctx->stats[vec_len (ctx->stats) - n_items],
2150 n_items * sizeof (*ctx->stats));
2152 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2158 pause = svm_queue_is_full (q);
2160 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2161 svm_queue_unlock (q);
2165 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2166 STATS_RELEASE_DELAY_NS);
2171 do_udp_encap_counters (stats_main_t * sm)
2173 udp_encap_stat_t *stat;
2175 udp_encap_stats_walk_t ctx = {
2179 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2180 udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
2183 udp_encap_ship (&ctx);
2187 stats_set_poller_delay (u32 poller_delay_sec)
2189 stats_main_t *sm = &stats_main;
2190 if (!poller_delay_sec)
2192 return VNET_API_ERROR_INVALID_ARGUMENT;
2196 sm->stats_poll_interval_in_seconds = poller_delay_sec;
2201 static clib_error_t *
2202 stats_config (vlib_main_t * vm, unformat_input_t * input)
2206 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2208 if (unformat (input, "interval %u", &sec))
2210 int rv = stats_set_poller_delay (sec);
2213 return clib_error_return (0,
2214 "`stats_set_poller_delay' API call failed, rv=%d:%U",
2215 (int) rv, format_vnet_api_errno, rv);
2221 return clib_error_return (0, "unknown input '%U'",
2222 format_unformat_error, input);
2228 /* stats { ... } configuration. */
2231 * @cfgcmd{interval, <seconds>}
2232 * Configure stats poller delay to be @c seconds.
2235 VLIB_CONFIG_FUNCTION (stats_config, "stats");
2238 vl_api_stats_get_poller_delay_t_handler
2239 (vl_api_stats_get_poller_delay_t * mp)
2241 stats_main_t *sm = &stats_main;
2242 vl_api_registration_t *reg;
2243 reg = vl_api_client_index_to_registration (mp->client_index);
2246 vl_api_stats_get_poller_delay_reply_t *rmp;
2248 rmp = vl_msg_api_alloc (sizeof (*rmp));
2249 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
2250 rmp->context = mp->context;
2252 rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2254 vl_api_send_msg (reg, (u8 *) rmp);
2259 stats_thread_fn (void *arg)
2261 stats_main_t *sm = &stats_main;
2262 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2263 vlib_thread_main_t *tm = vlib_get_thread_main ();
2265 /* stats thread wants no signals. */
2269 pthread_sigmask (SIG_SETMASK, &s, 0);
2272 if (vec_len (tm->thread_prefix))
2273 vlib_set_thread_name ((char *)
2274 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2276 clib_mem_set_heap (w->thread_mheap);
2280 ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
2283 if (!(sm->enable_poller))
2288 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2289 do_combined_per_interface_counters (sm);
2292 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2293 do_simple_per_interface_counters (sm);
2295 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2296 do_ip4_fib_counters (sm);
2298 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2299 do_ip6_fib_counters (sm);
2301 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2302 do_ip4_mfib_counters (sm);
2304 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2305 do_ip6_mfib_counters (sm);
2307 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2308 do_ip4_nbr_counters (sm);
2310 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2311 do_ip6_nbr_counters (sm);
2313 if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS]))
2314 do_udp_encap_counters (sm);
2319 vl_api_vnet_interface_simple_counters_t_handler
2320 (vl_api_vnet_interface_simple_counters_t * mp)
2322 vpe_client_registration_t *clients, client;
2323 stats_main_t *sm = &stats_main;
2324 vl_api_registration_t *reg, *reg_prev = NULL;
2325 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2329 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2332 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2333 ~0 /*flag for all */ );
2335 for (i = 0; i < vec_len (clients); i++)
2337 client = clients[i];
2338 reg = vl_api_client_index_to_registration (client.client_index);
2341 if (reg_prev && vl_api_can_send_msg (reg_prev))
2343 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2344 clib_memcpy (mp_copy, mp, mp_size);
2345 vl_api_send_msg (reg_prev, (u8 *) mp);
2353 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2354 client.client_index);
2360 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2363 if (reg_prev && vl_api_can_send_msg (reg_prev))
2365 vl_api_send_msg (reg_prev, (u8 *) mp);
2369 vl_msg_api_free (mp);
2374 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2376 stats_main_t *sm = &stats_main;
2377 vl_api_registration_t *reg, *reg_prev = NULL;
2378 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2380 vpe_client_registration_t *clients, client;
2383 mp_size = sizeof (*mp_copy) +
2384 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2387 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2389 for (i = 0; i < vec_len (clients); i++)
2391 client = clients[i];
2392 reg = vl_api_client_index_to_registration (client.client_index);
2395 if (reg_prev && vl_api_can_send_msg (reg_prev))
2397 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2398 clib_memcpy (mp_copy, mp, mp_size);
2399 vl_api_send_msg (reg_prev, (u8 *) mp);
2406 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2407 ~0, client.client_index);
2412 if (reg_prev && vl_api_can_send_msg (reg_prev))
2414 vl_api_send_msg (reg_prev, (u8 *) mp);
2418 vl_msg_api_free (mp);
2423 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2425 stats_main_t *sm = &stats_main;
2426 vl_api_registration_t *reg, *reg_prev = NULL;
2427 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2429 vpe_client_registration_t *clients, client;
2432 mp_size = sizeof (*mp_copy) +
2433 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2436 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2438 for (i = 0; i < vec_len (clients); i++)
2440 client = clients[i];
2441 reg = vl_api_client_index_to_registration (client.client_index);
2444 if (reg_prev && vl_api_can_send_msg (reg_prev))
2446 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2447 clib_memcpy (mp_copy, mp, mp_size);
2448 vl_api_send_msg (reg_prev, (u8 *) mp);
2455 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2456 ~0, client.client_index);
2462 if (reg_prev && vl_api_can_send_msg (reg_prev))
2464 vl_api_send_msg (reg_prev, (u8 *) mp);
2468 vl_msg_api_free (mp);
2473 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2475 stats_main_t *sm = &stats_main;
2476 vl_api_registration_t *reg, *reg_prev = NULL;
2477 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2479 vpe_client_registration_t *clients, client;
2482 mp_size = sizeof (*mp_copy) +
2483 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2486 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2488 for (i = 0; i < vec_len (clients); i++)
2490 client = clients[i];
2491 reg = vl_api_client_index_to_registration (client.client_index);
2494 if (reg_prev && vl_api_can_send_msg (reg_prev))
2496 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2497 clib_memcpy (mp_copy, mp, mp_size);
2498 vl_api_send_msg (reg_prev, (u8 *) mp);
2505 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2506 ~0, client.client_index);
2511 if (reg_prev && vl_api_can_send_msg (reg_prev))
2513 vl_api_send_msg (reg_prev, (u8 *) mp);
2517 vl_msg_api_free (mp);
2522 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2524 stats_main_t *sm = &stats_main;
2525 vl_api_registration_t *reg, *reg_prev = NULL;
2526 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2528 vpe_client_registration_t *clients, client;
2531 mp_size = sizeof (*mp_copy) +
2532 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2535 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2537 for (i = 0; i < vec_len (clients); i++)
2539 client = clients[i];
2540 reg = vl_api_client_index_to_registration (client.client_index);
2543 if (reg_prev && vl_api_can_send_msg (reg_prev))
2545 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2546 clib_memcpy (mp_copy, mp, mp_size);
2547 vl_api_send_msg (reg_prev, (u8 *) mp);
2554 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2555 ~0, client.client_index);
2560 if (reg_prev && vl_api_can_send_msg (reg_prev))
2562 vl_api_send_msg (reg_prev, (u8 *) mp);
2566 vl_msg_api_free (mp);
2571 vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
2573 stats_main_t *sm = &stats_main;
2574 vpe_client_registration_t rp;
2575 vl_api_want_udp_encap_stats_reply_t *rmp;
2578 vl_api_registration_t *reg;
2581 fib = ~0; //Using same mechanism as _per_interface_
2582 rp.client_index = mp->client_index;
2583 rp.client_pid = mp->pid;
2585 handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2588 reg = vl_api_client_index_to_registration (mp->client_index);
2592 sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2593 fib, mp->client_index);
2597 rmp = vl_msg_api_alloc (sizeof (*rmp));
2598 rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2599 rmp->context = mp->context;
2600 rmp->retval = retval;
2602 vl_api_send_msg (reg, (u8 *) rmp);
2606 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2608 stats_main_t *sm = &stats_main;
2609 vpe_client_registration_t rp;
2610 vl_api_want_stats_reply_t *rmp;
2614 vl_api_registration_t *reg;
2616 item = ~0; //"ALL THE THINGS IN THE THINGS
2617 rp.client_index = mp->client_index;
2618 rp.client_pid = mp->pid;
2620 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2621 item, mp->enable_disable);
2623 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2624 item, mp->enable_disable);
2626 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2627 item, mp->enable_disable);
2629 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2630 item, mp->enable_disable);
2632 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2633 item, mp->enable_disable);
2635 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2636 item, mp->enable_disable);
2639 reg = vl_api_client_index_to_registration (mp->client_index);
2643 rmp = vl_msg_api_alloc (sizeof (*rmp));
2644 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2645 rmp->context = mp->context;
2646 rmp->retval = retval;
2648 vl_api_send_msg (reg, (u8 *) rmp);
2652 vl_api_want_interface_simple_stats_t_handler
2653 (vl_api_want_interface_simple_stats_t * mp)
2655 stats_main_t *sm = &stats_main;
2656 vpe_client_registration_t rp;
2657 vl_api_want_interface_simple_stats_reply_t *rmp;
2661 vl_api_registration_t *reg;
2663 swif = ~0; //Using same mechanism as _per_interface_
2664 rp.client_index = mp->client_index;
2665 rp.client_pid = mp->pid;
2667 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2668 mp->enable_disable);
2671 reg = vl_api_client_index_to_registration (mp->client_index);
2676 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2681 rmp = vl_msg_api_alloc (sizeof (*rmp));
2682 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2683 rmp->context = mp->context;
2684 rmp->retval = retval;
2686 vl_api_send_msg (reg, (u8 *) rmp);
2691 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2693 stats_main_t *sm = &stats_main;
2694 vpe_client_registration_t rp;
2695 vl_api_want_ip4_fib_stats_reply_t *rmp;
2698 vl_api_registration_t *reg;
2701 fib = ~0; //Using same mechanism as _per_interface_
2702 rp.client_index = mp->client_index;
2703 rp.client_pid = mp->pid;
2705 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2706 mp->enable_disable);
2709 reg = vl_api_client_index_to_registration (mp->client_index);
2713 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2714 fib, mp->client_index);
2718 rmp = vl_msg_api_alloc (sizeof (*rmp));
2719 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2720 rmp->context = mp->context;
2721 rmp->retval = retval;
2723 vl_api_send_msg (reg, (u8 *) rmp);
2727 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2729 stats_main_t *sm = &stats_main;
2730 vpe_client_registration_t rp;
2731 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2734 vl_api_registration_t *reg;
2737 mfib = ~0; //Using same mechanism as _per_interface_
2738 rp.client_index = mp->client_index;
2739 rp.client_pid = mp->pid;
2741 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2742 mp->enable_disable);
2745 reg = vl_api_client_index_to_registration (mp->client_index);
2748 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2749 mfib, mp->client_index);
2753 rmp = vl_msg_api_alloc (sizeof (*rmp));
2754 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2755 rmp->context = mp->context;
2756 rmp->retval = retval;
2758 vl_api_send_msg (reg, (u8 *) rmp);
2762 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2764 stats_main_t *sm = &stats_main;
2765 vpe_client_registration_t rp;
2766 vl_api_want_ip4_fib_stats_reply_t *rmp;
2769 vl_api_registration_t *reg;
2772 fib = ~0; //Using same mechanism as _per_interface_
2773 rp.client_index = mp->client_index;
2774 rp.client_pid = mp->pid;
2776 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2777 mp->enable_disable);
2780 reg = vl_api_client_index_to_registration (mp->client_index);
2783 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2784 fib, mp->client_index);
2788 rmp = vl_msg_api_alloc (sizeof (*rmp));
2789 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2790 rmp->context = mp->context;
2791 rmp->retval = retval;
2793 vl_api_send_msg (reg, (u8 *) rmp);
2797 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2799 stats_main_t *sm = &stats_main;
2800 vpe_client_registration_t rp;
2801 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2804 vl_api_registration_t *reg;
2807 mfib = ~0; //Using same mechanism as _per_interface_
2808 rp.client_index = mp->client_index;
2809 rp.client_pid = mp->pid;
2811 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2812 mp->enable_disable);
2815 reg = vl_api_client_index_to_registration (mp->client_index);
2818 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2819 mfib, mp->client_index);
2823 rmp = vl_msg_api_alloc (sizeof (*rmp));
2824 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2825 rmp->context = mp->context;
2826 rmp->retval = retval;
2828 vl_api_send_msg (reg, (u8 *) rmp);
2831 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2833 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2838 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2843 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2845 stats_main_t *sm = &stats_main;
2846 vnet_interface_main_t *im = sm->interface_main;
2847 vl_api_vnet_get_summary_stats_reply_t *rmp;
2848 vlib_combined_counter_main_t *cm;
2851 u64 total_pkts[VLIB_N_RX_TX];
2852 u64 total_bytes[VLIB_N_RX_TX];
2853 vl_api_registration_t *reg;
2855 reg = vl_api_client_index_to_registration (mp->client_index);
2859 rmp = vl_msg_api_alloc (sizeof (*rmp));
2860 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2861 rmp->context = mp->context;
2864 memset (total_pkts, 0, sizeof (total_pkts));
2865 memset (total_bytes, 0, sizeof (total_bytes));
2867 vnet_interface_counter_lock (im);
2869 vec_foreach (cm, im->combined_sw_if_counters)
2871 which = cm - im->combined_sw_if_counters;
2873 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2875 vlib_get_combined_counter (cm, i, &v);
2876 total_pkts[which] += v.packets;
2877 total_bytes[which] += v.bytes;
2880 vnet_interface_counter_unlock (im);
2882 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2883 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2884 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2885 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2887 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2889 vl_api_send_msg (reg, (u8 *) rmp);
2893 stats_memclnt_delete_callback (u32 client_index)
2895 vpe_client_stats_registration_t *rp;
2896 stats_main_t *sm = &stats_main;
2900 /* p = hash_get (sm->stats_registration_hash, client_index); */
2903 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2904 /* pool_put (sm->stats_registrations, rp); */
2905 /* hash_unset (sm->stats_registration_hash, client_index); */
2911 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2912 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2913 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2914 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2915 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2916 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2917 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2918 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2919 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2920 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2921 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2922 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2924 static clib_error_t *
2925 stats_init (vlib_main_t * vm)
2927 stats_main_t *sm = &stats_main;
2928 api_main_t *am = &api_main;
2929 void *vlib_worker_thread_bootstrap_fn (void *arg);
2932 sm->vnet_main = vnet_get_main ();
2933 sm->interface_main = &vnet_get_main ()->interface_main;
2935 sm->stats_poll_interval_in_seconds = 10;
2936 sm->data_structure_lock =
2937 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
2938 CLIB_CACHE_LINE_BYTES);
2939 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
2942 vl_msg_api_set_handlers(VL_API_##N, #n, \
2943 vl_api_##n##_t_handler, \
2945 vl_api_##n##_t_endian, \
2946 vl_api_##n##_t_print, \
2947 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
2951 /* tell the msg infra not to free these messages... */
2952 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
2953 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
2954 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
2955 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
2956 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
2957 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
2960 * Set up the (msg_name, crc, message-id) table
2962 setup_message_id_table (am);
2964 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
2965 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
2966 #define stats_reg(n) \
2967 sm->stats_registrations[IDX_##n] = 0; \
2968 sm->stats_registration_hash[IDX_##n] = 0;
2969 #include <vpp/stats/stats.reg>
2975 VLIB_INIT_FUNCTION (stats_init);
2978 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
2980 .function = stats_thread_fn,
2983 .no_data_structure_clone = 1,
2989 * fd.io coding-style-patch-verification: ON
2992 * eval: (c-set-style "gnu")