2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
25 stats_main_t stats_main;
27 #include <vnet/ip/ip.h>
29 #include <vpp/api/vpe_msg_enum.h>
32 #define f64_print(a,b)
34 #define vl_typedefs /* define message structures */
35 #include <vpp/api/vpe_all_api_h.h>
38 #define vl_endianfun /* define message structures */
39 #include <vpp/api/vpe_all_api_h.h>
42 /* instantiate all the print functions we know about */
43 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
45 #include <vpp/api/vpe_all_api_h.h>
48 #define foreach_stats_msg \
49 _(WANT_STATS, want_stats) \
50 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
51 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
52 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
53 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
55 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
56 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
57 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
58 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
59 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
60 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
61 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
62 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
63 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
64 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
65 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
66 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
67 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
68 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats)
70 #define vl_msg_name_crc_list
71 #include <vpp/stats/stats.api.h>
72 #undef vl_msg_name_crc_list
75 setup_message_id_table (api_main_t * am)
78 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
79 foreach_vl_msg_name_crc_stats;
83 /* These constants ensure msg sizes <= 1024, aka ring allocation */
84 #define SIMPLE_COUNTER_BATCH_SIZE 126
85 #define COMBINED_COUNTER_BATCH_SIZE 63
86 #define IP4_FIB_COUNTER_BATCH_SIZE 48
87 #define IP6_FIB_COUNTER_BATCH_SIZE 30
88 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
89 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
90 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
93 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
97 format_vnet_interface_combined_counters (u8 * s, va_list * args)
99 stats_main_t *sm = &stats_main;
100 vl_api_vnet_interface_combined_counters_t *mp =
101 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
104 u32 count, sw_if_index;
106 count = ntohl (mp->count);
107 sw_if_index = ntohl (mp->first_sw_if_index);
111 vp = (vlib_counter_t *) mp->data;
113 switch (mp->vnet_counter_type)
115 case VNET_INTERFACE_COUNTER_RX:
118 case VNET_INTERFACE_COUNTER_TX:
122 counter_name = "bogus";
125 for (i = 0; i < count; i++)
127 packets = clib_mem_unaligned (&vp->packets, u64);
128 packets = clib_net_to_host_u64 (packets);
129 bytes = clib_mem_unaligned (&vp->bytes, u64);
130 bytes = clib_net_to_host_u64 (bytes);
132 s = format (s, "%U.%s.packets %lld\n",
133 format_vnet_sw_if_index_name,
134 sm->vnet_main, sw_if_index, counter_name, packets);
135 s = format (s, "%U.%s.bytes %lld\n",
136 format_vnet_sw_if_index_name,
137 sm->vnet_main, sw_if_index, counter_name, bytes);
144 format_vnet_interface_simple_counters (u8 * s, va_list * args)
146 stats_main_t *sm = &stats_main;
147 vl_api_vnet_interface_simple_counters_t *mp =
148 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
150 u32 count, sw_if_index;
151 count = ntohl (mp->count);
152 sw_if_index = ntohl (mp->first_sw_if_index);
154 vp = (u64 *) mp->data;
157 switch (mp->vnet_counter_type)
159 case VNET_INTERFACE_COUNTER_DROP:
160 counter_name = "drop";
162 case VNET_INTERFACE_COUNTER_PUNT:
163 counter_name = "punt";
165 case VNET_INTERFACE_COUNTER_IP4:
166 counter_name = "ip4";
168 case VNET_INTERFACE_COUNTER_IP6:
169 counter_name = "ip6";
171 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
172 counter_name = "rx-no-buff";
174 case VNET_INTERFACE_COUNTER_RX_MISS:
175 counter_name = "rx-miss";
177 case VNET_INTERFACE_COUNTER_RX_ERROR:
178 counter_name = "rx-error (fifo-full)";
180 case VNET_INTERFACE_COUNTER_TX_ERROR:
181 counter_name = "tx-error (fifo-full)";
184 counter_name = "bogus";
187 for (i = 0; i < count; i++)
189 v = clib_mem_unaligned (vp, u64);
190 v = clib_net_to_host_u64 (v);
192 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
193 sm->vnet_main, sw_if_index, counter_name, v);
201 dslock (stats_main_t * sm, int release_hint, int tag)
204 data_structure_lock_t *l = sm->data_structure_lock;
206 if (PREDICT_FALSE (l == 0))
209 thread_index = vlib_get_thread_index ();
210 if (l->lock && l->thread_index == thread_index)
219 while (__sync_lock_test_and_set (&l->lock, 1))
222 l->thread_index = thread_index;
227 stats_dslock_with_hint (int hint, int tag)
229 stats_main_t *sm = &stats_main;
230 dslock (sm, hint, tag);
234 dsunlock (stats_main_t * sm)
237 data_structure_lock_t *l = sm->data_structure_lock;
239 if (PREDICT_FALSE (l == 0))
242 thread_index = vlib_get_thread_index ();
243 ASSERT (l->lock && l->thread_index == thread_index);
249 CLIB_MEMORY_BARRIER ();
255 stats_dsunlock (int hint, int tag)
257 stats_main_t *sm = &stats_main;
261 static vpe_client_registration_t *
262 get_client_for_stat (u32 reg, u32 item, u32 client_index)
264 stats_main_t *sm = &stats_main;
265 vpe_client_stats_registration_t *registration;
268 /* Is there anything listening for item in that reg */
269 p = hash_get (sm->stats_registration_hash[reg], item);
274 /* If there is, is our client_index one of them */
275 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
276 p = hash_get (registration->client_hash, client_index);
281 return pool_elt_at_index (registration->clients, p[0]);
286 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
288 stats_main_t *sm = &stats_main;
289 vpe_client_stats_registration_t *registration;
290 vpe_client_registration_t *cr;
293 /* Is there anything listening for item in that reg */
294 p = hash_get (sm->stats_registration_hash[reg], item);
298 pool_get (sm->stats_registrations[reg], registration);
299 registration->item = item;
300 hash_set (sm->stats_registration_hash[reg], item,
301 registration - sm->stats_registrations[reg]);
305 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
308 p = hash_get (registration->client_hash, client->client_index);
312 pool_get (registration->clients, cr);
313 cr->client_index = client->client_index;
314 cr->client_pid = client->client_pid;
315 hash_set (registration->client_hash, cr->client_index,
316 cr - registration->clients);
319 return 1; //At least one client is doing something ... poll
323 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
325 stats_main_t *sm = &stats_main;
326 vpe_client_stats_registration_t *registration;
327 vpe_client_registration_t *client;
331 /* Clear the client first */
332 /* Is there anything listening for item in that reg */
333 p = hash_get (sm->stats_registration_hash[reg], item);
338 /* If there is, is our client_index one of them */
339 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
340 p = hash_get (registration->client_hash, client_index);
345 client = pool_elt_at_index (registration->clients, p[0]);
346 hash_unset (registration->client_hash, client->client_index);
347 pool_put (registration->clients, client);
349 /* Now check if that was the last client for that item */
350 if (0 == pool_elts (registration->clients))
352 hash_unset (sm->stats_registration_hash[reg], item);
353 pool_put (sm->stats_registrations[reg], registration);
358 /* Now check if that was the last item in any of the listened to stats */
359 for (i = 0; i < STATS_REG_N_IDX; i++)
361 elts += pool_elts (sm->stats_registrations[i]);
367 * Return a copy of the clients list.
369 vpe_client_registration_t *
370 get_clients_for_stat (u32 reg, u32 item)
372 stats_main_t *sm = &stats_main;
373 vpe_client_registration_t *client, *clients = 0;
374 vpe_client_stats_registration_t *registration;
377 /* Is there anything listening for item in that reg */
378 p = hash_get (sm->stats_registration_hash[reg], item);
383 /* If there is, is our client_index one of them */
384 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
386 vec_reset_length (clients);
389 pool_foreach (client, registration->clients,
391 vec_add1 (clients, *client);}
399 clear_client_reg (u32 ** registrations)
401 /* When registrations[x] is a vector of pool indices
402 here is a good place to clean up the pools
404 #define stats_reg(n) vec_free(registrations[IDX_##n]);
405 #include <vpp/stats/stats.reg>
408 vec_free (registrations);
412 init_client_reg (u32 ** registrations)
416 Initialise the stats registrations for each
417 type of stat a client can register for as well as
418 a vector of "interested" indexes.
419 Initially this is a u32 of either sw_if_index or fib_index
420 but eventually this should migrate to a pool_index (u32)
421 with a type specific pool that can include more complex things
422 such as timing and structured events.
424 vec_validate (registrations, STATS_REG_N_IDX);
425 #define stats_reg(n) \
426 vec_reset_length(registrations[IDX_##n]);
427 #include <vpp/stats/stats.reg>
431 When registrations[x] is a vector of pool indices, here
432 is a good place to init the pools.
434 return registrations;
438 enable_all_client_reg (u32 ** registrations)
442 Enable all stats known by adding
443 ~0 to the index vector. Eventually this
444 should be deprecated.
446 #define stats_reg(n) \
447 vec_add1(registrations[IDX_##n], ~0);
448 #include <vpp/stats/stats.reg>
450 return registrations;
454 do_simple_interface_counters (stats_main_t * sm)
456 vl_api_vnet_interface_simple_counters_t *mp = 0;
457 vnet_interface_main_t *im = sm->interface_main;
458 api_main_t *am = sm->api_main;
459 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
460 svm_queue_t *q = shmem_hdr->vl_input_queue;
461 vlib_simple_counter_main_t *cm;
462 u32 items_this_message = 0;
467 * Prevent interface registration from expanding / moving the vectors...
468 * That tends never to happen, so we can hold this lock for a while.
470 vnet_interface_counter_lock (im);
472 vec_foreach (cm, im->sw_if_counters)
474 n_counts = vlib_simple_counter_n_counters (cm);
475 for (i = 0; i < n_counts; i++)
479 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
482 mp = vl_msg_api_alloc_as_if_client
483 (sizeof (*mp) + items_this_message * sizeof (v));
484 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
485 mp->vnet_counter_type = cm - im->sw_if_counters;
486 mp->first_sw_if_index = htonl (i);
488 vp = (u64 *) mp->data;
490 v = vlib_get_simple_counter (cm, i);
491 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
494 if (mp->count == items_this_message)
496 mp->count = htonl (items_this_message);
497 /* Send to the main thread... */
498 vl_msg_api_send_shmem (q, (u8 *) & mp);
504 vnet_interface_counter_unlock (im);
508 handle_client_registration (vpe_client_registration_t * client, u32 stat,
509 u32 item, int enable_disable)
511 stats_main_t *sm = &stats_main;
512 vpe_client_registration_t *rp, _rp;
514 rp = get_client_for_stat (stat, item, client->client_index);
517 if (enable_disable == 0)
519 if (!rp) // No client to disable
521 clib_warning ("pid %d: already disabled for stats...",
526 clear_client_for_stat (stat, item, client->client_index);
533 rp->client_index = client->client_index;
534 rp->client_pid = client->client_pid;
535 sm->enable_poller = set_client_for_stat (stat, item, rp);
540 /**********************************
541 * ALL Interface Combined stats - to be deprecated
542 **********************************/
545 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
548 vl_api_want_interface_combined_stats_t_handler
549 (vl_api_want_interface_combined_stats_t * mp)
551 stats_main_t *sm = &stats_main;
552 vpe_client_registration_t rp;
553 vl_api_want_interface_combined_stats_reply_t *rmp;
556 vl_api_registration_t *reg;
559 swif = ~0; //Using same mechanism as _per_interface_
560 rp.client_index = mp->client_index;
561 rp.client_pid = mp->pid;
563 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
567 reg = vl_api_client_index_to_registration (mp->client_index);
571 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
576 rmp = vl_msg_api_alloc (sizeof (*rmp));
577 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
578 rmp->context = mp->context;
579 rmp->retval = retval;
581 vl_api_send_msg (reg, (u8 *) rmp);
585 vl_api_vnet_interface_combined_counters_t_handler
586 (vl_api_vnet_interface_combined_counters_t * mp)
588 vpe_client_registration_t *clients, client;
589 stats_main_t *sm = &stats_main;
590 vl_api_registration_t *reg, *reg_prev = NULL;
591 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
595 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
598 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
599 ~0 /*flag for all */ );
601 for (i = 0; i < vec_len (clients); i++)
604 reg = vl_api_client_index_to_registration (client.client_index);
607 if (reg_prev && vl_api_can_send_msg (reg_prev))
609 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
610 clib_memcpy (mp_copy, mp, mp_size);
611 vl_api_send_msg (reg_prev, (u8 *) mp);
619 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
622 if (reg_prev && vl_api_can_send_msg (reg_prev))
624 vl_api_send_msg (reg_prev, (u8 *) mp);
628 vl_msg_api_free (mp);
633 do_combined_interface_counters (stats_main_t * sm)
635 vl_api_vnet_interface_combined_counters_t *mp = 0;
636 vnet_interface_main_t *im = sm->interface_main;
637 api_main_t *am = sm->api_main;
638 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
639 svm_queue_t *q = shmem_hdr->vl_input_queue;
640 vlib_combined_counter_main_t *cm;
641 u32 items_this_message = 0;
642 vlib_counter_t v, *vp = 0;
645 vnet_interface_counter_lock (im);
647 vec_foreach (cm, im->combined_sw_if_counters)
649 n_counts = vlib_combined_counter_n_counters (cm);
650 for (i = 0; i < n_counts; i++)
654 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
657 mp = vl_msg_api_alloc_as_if_client
658 (sizeof (*mp) + items_this_message * sizeof (v));
659 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
660 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
661 mp->first_sw_if_index = htonl (i);
663 vp = (vlib_counter_t *) mp->data;
665 vlib_get_combined_counter (cm, i, &v);
666 clib_mem_unaligned (&vp->packets, u64)
667 = clib_host_to_net_u64 (v.packets);
668 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
671 if (mp->count == items_this_message)
673 mp->count = htonl (items_this_message);
674 /* Send to the main thread... */
675 vl_msg_api_send_shmem (q, (u8 *) & mp);
681 vnet_interface_counter_unlock (im);
684 /**********************************
685 * Per Interface Combined stats
686 **********************************/
688 /* Request from client registering interfaces it wants */
690 vl_api_want_per_interface_combined_stats_t_handler
691 (vl_api_want_per_interface_combined_stats_t * mp)
693 stats_main_t *sm = &stats_main;
694 vpe_client_registration_t rp;
695 vl_api_want_per_interface_combined_stats_reply_t *rmp;
696 vlib_combined_counter_main_t *cm;
699 vl_api_registration_t *reg;
700 u32 i, swif, num = 0;
702 num = ntohl (mp->num);
705 * Validate sw_if_indexes before registering
707 for (i = 0; i < num; i++)
709 swif = ntohl (mp->sw_ifs[i]);
712 * Check its a real sw_if_index that the client is allowed to see
716 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
718 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
724 for (i = 0; i < num; i++)
726 swif = ntohl (mp->sw_ifs[i]);
728 rp.client_index = mp->client_index;
729 rp.client_pid = mp->pid;
730 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
731 swif, ntohl (mp->enable_disable));
735 reg = vl_api_client_index_to_registration (mp->client_index);
738 for (i = 0; i < num; i++)
740 swif = ntohl (mp->sw_ifs[i]);
743 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
749 rmp = vl_msg_api_alloc (sizeof (*rmp));
750 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
751 rmp->context = mp->context;
752 rmp->retval = retval;
754 vl_api_send_msg (reg, (u8 *) rmp);
757 /* Per Interface Combined distribution to client */
759 do_combined_per_interface_counters (stats_main_t * sm)
761 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
762 vnet_interface_main_t *im = sm->interface_main;
763 api_main_t *am = sm->api_main;
764 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
765 vl_api_registration_t *vl_reg;
766 vlib_combined_counter_main_t *cm;
767 vl_api_vnet_combined_counter_t *vp = 0;
770 vpe_client_stats_registration_t *reg;
771 vpe_client_registration_t *client;
772 u32 *sw_if_index = 0;
774 vnet_interface_counter_lock (im);
776 vec_reset_length (sm->regs_tmp);
780 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
781 ({ vec_add1 (sm->regs_tmp, reg); }));
784 for (i = 0; i < vec_len (sm->regs_tmp); i++)
786 reg = sm->regs_tmp[i];
789 vnet_interface_counter_unlock (im);
790 do_combined_interface_counters (sm);
791 vnet_interface_counter_lock (im);
794 vec_reset_length (sm->clients_tmp);
797 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
801 for (j = 0; j < vec_len (sm->clients_tmp); j++)
803 client = sm->clients_tmp[j];
805 vl_reg = vl_api_client_index_to_registration (client->client_index);
807 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
811 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
812 reg->item, client->client_index);
815 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
816 memset (mp, 0, sizeof (*mp));
819 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
822 * count will eventually be used to optimise the batching
823 * of per client messages for each stat. For now setting this to 1 then
824 * iterate. This will not affect API.
826 * FIXME instead of enqueueing here, this should be sent to a batch
827 * storer for per-client transmission. Each "mp" sent would be a single entry
828 * and if a client is listening to other sw_if_indexes for same, it would be
829 * appended to that *mp
833 * - capturing the timestamp of the counters "when VPP knew them" is important.
834 * Less so is that the timing of the delivery to the control plane be in the same
837 * i.e. As long as the control plane can delta messages from VPP and work out
838 * velocity etc based on the timestamp, it can do so in a more "batch mode".
840 * It would be beneficial to keep a "per-client" message queue, and then
841 * batch all the stat messages for a client into one message, with
842 * discrete timestamps.
844 * Given this particular API is for "per interface" one assumes that the scale
845 * is less than the ~0 case, which the prior API is suited for.
849 * 1 message per api call for now
851 mp->count = htonl (1);
852 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
854 vp = (vl_api_vnet_combined_counter_t *) mp->data;
855 vp->sw_if_index = htonl (reg->item);
857 im = &vnet_get_main ()->interface_main;
860 cm = im->combined_sw_if_counters + X; \
861 vlib_get_combined_counter (cm, reg->item, &v); \
862 clib_mem_unaligned (&vp->x##_packets, u64) = \
863 clib_host_to_net_u64 (v.packets); \
864 clib_mem_unaligned (&vp->x##_bytes, u64) = \
865 clib_host_to_net_u64 (v.bytes);
868 _(VNET_INTERFACE_COUNTER_RX, rx);
869 _(VNET_INTERFACE_COUNTER_TX, tx);
870 _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
871 _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
872 _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
873 _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
874 _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
875 _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
879 vl_api_send_msg (vl_reg, (u8 *) mp);
883 vnet_interface_counter_unlock (im);
886 /**********************************
887 * Per Interface simple stats
888 **********************************/
890 /* Request from client registering interfaces it wants */
892 vl_api_want_per_interface_simple_stats_t_handler
893 (vl_api_want_per_interface_simple_stats_t * mp)
895 stats_main_t *sm = &stats_main;
896 vpe_client_registration_t rp;
897 vl_api_want_per_interface_simple_stats_reply_t *rmp;
898 vlib_simple_counter_main_t *cm;
901 vl_api_registration_t *reg;
902 u32 i, swif, num = 0;
904 num = ntohl (mp->num);
906 for (i = 0; i < num; i++)
908 swif = ntohl (mp->sw_ifs[i]);
910 /* Check its a real sw_if_index that the client is allowed to see */
913 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
915 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
921 for (i = 0; i < num; i++)
923 swif = ntohl (mp->sw_ifs[i]);
925 rp.client_index = mp->client_index;
926 rp.client_pid = mp->pid;
927 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
928 swif, ntohl (mp->enable_disable));
932 reg = vl_api_client_index_to_registration (mp->client_index);
934 /* Client may have disconnected abruptly, clean up */
937 for (i = 0; i < num; i++)
939 swif = ntohl (mp->sw_ifs[i]);
941 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
949 rmp = vl_msg_api_alloc (sizeof (*rmp));
950 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
951 rmp->context = mp->context;
952 rmp->retval = retval;
954 vl_api_send_msg (reg, (u8 *) rmp);
957 /* Per Interface Simple distribution to client */
959 do_simple_per_interface_counters (stats_main_t * sm)
961 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
962 vnet_interface_main_t *im = sm->interface_main;
963 api_main_t *am = sm->api_main;
964 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
965 vl_api_registration_t *vl_reg;
966 vlib_simple_counter_main_t *cm;
968 vpe_client_stats_registration_t *reg;
969 vpe_client_registration_t *client;
970 u32 timestamp, count;
971 vl_api_vnet_simple_counter_t *vp = 0;
974 vnet_interface_counter_lock (im);
976 vec_reset_length (sm->regs_tmp);
980 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
981 ({ vec_add1 (sm->regs_tmp, reg); }));
984 for (i = 0; i < vec_len (sm->regs_tmp); i++)
986 reg = sm->regs_tmp[i];
989 vnet_interface_counter_unlock (im);
990 do_simple_interface_counters (sm);
991 vnet_interface_counter_lock (im);
994 vec_reset_length (sm->clients_tmp);
997 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
1001 for (j = 0; j < vec_len (sm->clients_tmp); j++)
1003 client = sm->clients_tmp[j];
1004 vl_reg = vl_api_client_index_to_registration (client->client_index);
1006 /* Client may have disconnected abrubtly, clean up */
1010 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1011 reg->item, client->client_index);
1015 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1016 memset (mp, 0, sizeof (*mp));
1017 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1020 * count will eventually be used to optimise the batching
1021 * of per client messages for each stat. For now setting this to 1 then
1022 * iterate. This will not affect API.
1024 * FIXME instead of enqueueing here, this should be sent to a batch
1025 * storer for per-client transmission. Each "mp" sent would be a single entry
1026 * and if a client is listening to other sw_if_indexes for same, it would be
1027 * appended to that *mp
1031 * - capturing the timestamp of the counters "when VPP knew them" is important.
1032 * Less so is that the timing of the delivery to the control plane be in the same
1035 * i.e. As long as the control plane can delta messages from VPP and work out
1036 * velocity etc based on the timestamp, it can do so in a more "batch mode".
1038 * It would be beneficial to keep a "per-client" message queue, and then
1039 * batch all the stat messages for a client into one message, with
1040 * discrete timestamps.
1042 * Given this particular API is for "per interface" one assumes that the scale
1043 * is less than the ~0 case, which the prior API is suited for.
1047 * 1 message per api call for now
1049 mp->count = htonl (1);
1050 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1051 vp = (vl_api_vnet_simple_counter_t *) mp->data;
1053 vp->sw_if_index = htonl (reg->item);
1055 // VNET_INTERFACE_COUNTER_DROP
1056 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1057 v = vlib_get_simple_counter (cm, reg->item);
1058 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1060 // VNET_INTERFACE_COUNTER_PUNT
1061 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1062 v = vlib_get_simple_counter (cm, reg->item);
1063 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1065 // VNET_INTERFACE_COUNTER_IP4
1066 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1067 v = vlib_get_simple_counter (cm, reg->item);
1068 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1070 //VNET_INTERFACE_COUNTER_IP6
1071 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1072 v = vlib_get_simple_counter (cm, reg->item);
1073 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1075 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1076 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1077 v = vlib_get_simple_counter (cm, reg->item);
1078 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1079 clib_host_to_net_u64 (v);
1081 //VNET_INTERFACE_COUNTER_RX_MISS
1082 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1083 v = vlib_get_simple_counter (cm, reg->item);
1084 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1086 //VNET_INTERFACE_COUNTER_RX_ERROR
1087 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1088 v = vlib_get_simple_counter (cm, reg->item);
1089 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1091 //VNET_INTERFACE_COUNTER_TX_ERROR
1092 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1093 v = vlib_get_simple_counter (cm, reg->item);
1094 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1096 //VNET_INTERFACE_COUNTER_MPLS
1097 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1098 v = vlib_get_simple_counter (cm, reg->item);
1099 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1101 vl_api_send_msg (vl_reg, (u8 *) mp);
1105 vnet_interface_counter_unlock (im);
1108 /**********************************
1110 **********************************/
1113 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1115 struct timespec _req, *req = &_req;
1116 struct timespec _rem, *rem = &_rem;
1119 req->tv_nsec = nsec;
1122 if (nanosleep (req, rem) == 0)
1127 clib_unix_warning ("nanosleep");
1133 * @brief The context passed when collecting adjacency counters
1135 typedef struct ip4_nbr_stats_ctx_t_
1138 * The SW IF index all these adjs belong to
1143 * A vector of ip4 nbr counters
1145 vl_api_ip4_nbr_counter_t *counters;
1146 } ip4_nbr_stats_ctx_t;
1148 static adj_walk_rc_t
1149 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1151 vl_api_ip4_nbr_counter_t *vl_counter;
1152 vlib_counter_t adj_counter;
1153 ip4_nbr_stats_ctx_t *ctx;
1154 ip_adjacency_t *adj;
1157 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1159 if (0 != adj_counter.packets)
1161 vec_add2 (ctx->counters, vl_counter, 1);
1164 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1165 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1166 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1167 vl_counter->link_type = adj->ia_link;
1169 return (ADJ_WALK_RC_CONTINUE);
1172 #define MIN(x,y) (((x)<(y))?(x):(y))
1175 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1177 api_main_t *am = sm->api_main;
1178 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1179 svm_queue_t *q = shmem_hdr->vl_input_queue;
1180 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1184 * If the walk context has counters, which may be left over from the last
1185 * suspend, then we continue from there.
1187 while (0 != vec_len (ctx->counters))
1189 u32 n_items = MIN (vec_len (ctx->counters),
1190 IP4_FIB_COUNTER_BATCH_SIZE);
1193 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1195 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1198 (vl_api_ip4_nbr_counter_t)));
1199 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1200 mp->count = ntohl (n_items);
1201 mp->sw_if_index = ntohl (ctx->sw_if_index);
1206 * copy the counters from the back of the context, then we can easily
1207 * 'erase' them by resetting the vector length.
1208 * The order we push the stats to the caller is not important.
1211 &ctx->counters[vec_len (ctx->counters) - n_items],
1212 n_items * sizeof (*ctx->counters));
1214 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1220 pause = svm_queue_is_full (q);
1222 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1223 svm_queue_unlock (q);
1227 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1228 STATS_RELEASE_DELAY_NS);
1233 do_ip4_nbr_counters (stats_main_t * sm)
1235 vnet_main_t *vnm = vnet_get_main ();
1236 vnet_interface_main_t *im = &vnm->interface_main;
1237 vnet_sw_interface_t *si;
1239 ip4_nbr_stats_ctx_t ctx = {
1245 pool_foreach (si, im->sw_interfaces,
1248 * update the interface we are now concerned with
1250 ctx.sw_if_index = si->sw_if_index;
1253 * we are about to walk another interface, so we shouldn't have any pending
1256 ASSERT(ctx.counters == NULL);
1259 * visit each neighbour adjacency on the interface and collect
1260 * its current stats.
1261 * Because we hold the lock the walk is synchronous, so safe to routing
1262 * updates. It's limited in work by the number of adjacenies on an
1263 * interface, which is typically not huge.
1265 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1266 adj_nbr_walk (si->sw_if_index,
1273 * if this interface has some adjacencies with counters then ship them,
1274 * else continue to the next interface.
1276 if (NULL != ctx.counters)
1278 ip4_nbr_ship(sm, &ctx);
1285 * @brief The context passed when collecting adjacency counters
1287 typedef struct ip6_nbr_stats_ctx_t_
1290 * The SW IF index all these adjs belong to
1295 * A vector of ip6 nbr counters
1297 vl_api_ip6_nbr_counter_t *counters;
1298 } ip6_nbr_stats_ctx_t;
1300 static adj_walk_rc_t
1301 ip6_nbr_stats_cb (adj_index_t ai,
1304 vl_api_ip6_nbr_counter_t *vl_counter;
1305 vlib_counter_t adj_counter;
1306 ip6_nbr_stats_ctx_t *ctx;
1307 ip_adjacency_t *adj;
1310 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1312 if (0 != adj_counter.packets)
1314 vec_add2(ctx->counters, vl_counter, 1);
1317 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1318 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1319 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1320 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1321 vl_counter->link_type = adj->ia_link;
1323 return (ADJ_WALK_RC_CONTINUE);
1326 #define MIN(x,y) (((x)<(y))?(x):(y))
1329 ip6_nbr_ship (stats_main_t * sm,
1330 ip6_nbr_stats_ctx_t *ctx)
1332 api_main_t *am = sm->api_main;
1333 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1334 svm_queue_t *q = shmem_hdr->vl_input_queue;
1335 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1339 * If the walk context has counters, which may be left over from the last
1340 * suspend, then we continue from there.
1342 while (0 != vec_len(ctx->counters))
1344 u32 n_items = MIN (vec_len (ctx->counters),
1345 IP6_FIB_COUNTER_BATCH_SIZE);
1348 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1350 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1353 (vl_api_ip6_nbr_counter_t)));
1354 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1355 mp->count = ntohl (n_items);
1356 mp->sw_if_index = ntohl (ctx->sw_if_index);
1361 * copy the counters from the back of the context, then we can easily
1362 * 'erase' them by resetting the vector length.
1363 * The order we push the stats to the caller is not important.
1366 &ctx->counters[vec_len (ctx->counters) - n_items],
1367 n_items * sizeof (*ctx->counters));
1369 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1375 pause = svm_queue_is_full (q);
1377 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1378 svm_queue_unlock (q);
1382 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1383 STATS_RELEASE_DELAY_NS);
1388 do_ip6_nbr_counters (stats_main_t * sm)
1390 vnet_main_t *vnm = vnet_get_main ();
1391 vnet_interface_main_t *im = &vnm->interface_main;
1392 vnet_sw_interface_t *si;
1394 ip6_nbr_stats_ctx_t ctx = {
1400 pool_foreach (si, im->sw_interfaces,
1403 * update the interface we are now concerned with
1405 ctx.sw_if_index = si->sw_if_index;
1408 * we are about to walk another interface, so we shouldn't have any pending
1411 ASSERT(ctx.counters == NULL);
1414 * visit each neighbour adjacency on the interface and collect
1415 * its current stats.
1416 * Because we hold the lock the walk is synchronous, so safe to routing
1417 * updates. It's limited in work by the number of adjacenies on an
1418 * interface, which is typically not huge.
1420 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1421 adj_nbr_walk (si->sw_if_index,
1428 * if this interface has some adjacencies with counters then ship them,
1429 * else continue to the next interface.
1431 if (NULL != ctx.counters)
1433 ip6_nbr_ship(sm, &ctx);
1440 do_ip4_fib_counters (stats_main_t * sm)
1442 ip4_main_t *im4 = &ip4_main;
1443 api_main_t *am = sm->api_main;
1444 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1445 svm_queue_t *q = shmem_hdr->vl_input_queue;
1449 do_ip46_fibs_t *do_fibs;
1450 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1451 u32 items_this_message;
1452 vl_api_ip4_fib_counter_t *ctrp = 0;
1453 u32 start_at_fib_index = 0;
1456 do_fibs = &sm->do_ip46_fibs;
1459 vec_reset_length (do_fibs->fibs);
1461 pool_foreach (fib, im4->fibs,
1462 ({vec_add1(do_fibs->fibs,fib);}));
1466 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1468 fib = do_fibs->fibs[j];
1469 /* We may have bailed out due to control-plane activity */
1470 while ((fib - im4->fibs) < start_at_fib_index)
1473 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1477 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1478 mp = vl_msg_api_alloc_as_if_client
1480 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1481 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1483 mp->vrf_id = ntohl (fib->ft_table_id);
1484 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1488 /* happens if the last FIB was empty... */
1489 ASSERT (mp->count == 0);
1490 mp->vrf_id = ntohl (fib->ft_table_id);
1493 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1495 vec_reset_length (do_fibs->ip4routes);
1496 vec_reset_length (do_fibs->results);
1498 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1500 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1504 vec_reset_length (do_fibs->pvec);
1506 x.address_length = i;
1508 hash_foreach_pair (p, hash, (
1510 vec_add1 (do_fibs->pvec, p);}
1512 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1514 p = do_fibs->pvec[k];
1515 x.address.data_u32 = p->key;
1516 x.index = p->value[0];
1518 vec_add1 (do_fibs->ip4routes, x);
1519 if (sm->data_structure_lock->release_hint)
1521 start_at_fib_index = fib - im4->fibs;
1523 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1524 STATS_RELEASE_DELAY_NS);
1526 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1532 vec_foreach (r, do_fibs->ip4routes)
1535 const dpo_id_t *dpo_id;
1538 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1539 index = (u32) dpo_id->dpoi_index;
1541 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1544 * If it has actually
1545 * seen at least one packet, send it.
1550 /* already in net byte order */
1551 ctrp->address = r->address.as_u32;
1552 ctrp->address_length = r->address_length;
1553 ctrp->packets = clib_host_to_net_u64 (c.packets);
1554 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1558 if (mp->count == items_this_message)
1560 mp->count = htonl (items_this_message);
1562 * If the main thread's input queue is stuffed,
1563 * drop the data structure lock (which the main thread
1564 * may want), and take a pause.
1567 if (svm_queue_is_full (q))
1570 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1571 svm_queue_unlock (q);
1573 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1574 STATS_RELEASE_DELAY_NS);
1577 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1578 svm_queue_unlock (q);
1580 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1581 mp = vl_msg_api_alloc_as_if_client
1583 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1584 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1586 mp->vrf_id = ntohl (fib->ft_table_id);
1587 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1589 } /* for each (mp or single) adj */
1590 if (sm->data_structure_lock->release_hint)
1592 start_at_fib_index = fib - im4->fibs;
1594 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1596 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1599 } /* vec_foreach (routes) */
1603 /* Flush any data from this fib */
1606 mp->count = htonl (mp->count);
1607 vl_msg_api_send_shmem (q, (u8 *) & mp);
1612 /* If e.g. the last FIB had no reportable routes, free the buffer */
1614 vl_msg_api_free (mp);
1618 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1620 stats_main_t *sm = ctx;
1621 do_ip46_fibs_t *do_fibs;
1622 mfib_entry_t *entry;
1624 do_fibs = &sm->do_ip46_fibs;
1625 entry = mfib_entry_get (fei);
1627 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1633 do_ip4_mfib_counters (stats_main_t * sm)
1635 ip4_main_t *im4 = &ip4_main;
1636 api_main_t *am = sm->api_main;
1637 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1638 svm_queue_t *q = shmem_hdr->vl_input_queue;
1641 do_ip46_fibs_t *do_fibs;
1642 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1643 u32 items_this_message;
1644 vl_api_ip4_mfib_counter_t *ctrp = 0;
1645 u32 start_at_mfib_index = 0;
1648 do_fibs = &sm->do_ip46_fibs;
1650 vec_reset_length (do_fibs->mfibs);
1652 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1655 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1657 mfib = do_fibs->mfibs[j];
1658 /* We may have bailed out due to control-plane activity */
1659 while ((mfib - im4->mfibs) < start_at_mfib_index)
1664 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1665 mp = vl_msg_api_alloc_as_if_client
1667 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1668 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1670 mp->vrf_id = ntohl (mfib->mft_table_id);
1671 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1675 /* happens if the last MFIB was empty... */
1676 ASSERT (mp->count == 0);
1677 mp->vrf_id = ntohl (mfib->mft_table_id);
1680 vec_reset_length (do_fibs->mroutes);
1683 * walk the table with table updates blocked
1685 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1687 mfib_table_walk (mfib->mft_index,
1688 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1691 vec_foreach (pfx, do_fibs->mroutes)
1693 const dpo_id_t *dpo_id;
1694 fib_node_index_t mfei;
1699 * re-lookup the entry, since we suspend during the collection
1701 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1703 if (FIB_NODE_INDEX_INVALID == mfei)
1706 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1707 index = (u32) dpo_id->dpoi_index;
1709 vlib_get_combined_counter (&replicate_main.repm_counters,
1710 dpo_id->dpoi_index, &c);
1712 * If it has seen at least one packet, send it.
1716 /* already in net byte order */
1717 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1718 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1719 ctrp->group_length = pfx->fp_len;
1720 ctrp->packets = clib_host_to_net_u64 (c.packets);
1721 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1725 if (mp->count == items_this_message)
1727 mp->count = htonl (items_this_message);
1729 * If the main thread's input queue is stuffed,
1730 * drop the data structure lock (which the main thread
1731 * may want), and take a pause.
1735 while (svm_queue_is_full (q))
1737 svm_queue_unlock (q);
1738 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1739 STATS_RELEASE_DELAY_NS);
1742 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1743 svm_queue_unlock (q);
1745 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1746 mp = vl_msg_api_alloc_as_if_client
1748 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1749 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1751 mp->vrf_id = ntohl (mfib->mft_table_id);
1752 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1757 /* Flush any data from this mfib */
1760 mp->count = htonl (mp->count);
1761 vl_msg_api_send_shmem (q, (u8 *) & mp);
1766 /* If e.g. the last FIB had no reportable routes, free the buffer */
1768 vl_msg_api_free (mp);
1772 do_ip6_mfib_counters (stats_main_t * sm)
1774 ip6_main_t *im6 = &ip6_main;
1775 api_main_t *am = sm->api_main;
1776 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1777 svm_queue_t *q = shmem_hdr->vl_input_queue;
1780 do_ip46_fibs_t *do_fibs;
1781 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1782 u32 items_this_message;
1783 vl_api_ip6_mfib_counter_t *ctrp = 0;
1784 u32 start_at_mfib_index = 0;
1787 do_fibs = &sm->do_ip46_fibs;
1789 vec_reset_length (do_fibs->mfibs);
1791 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1794 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1796 mfib = do_fibs->mfibs[j];
1797 /* We may have bailed out due to control-plane activity */
1798 while ((mfib - im6->mfibs) < start_at_mfib_index)
1803 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1804 mp = vl_msg_api_alloc_as_if_client
1806 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1807 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1809 mp->vrf_id = ntohl (mfib->mft_table_id);
1810 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1814 /* happens if the last MFIB was empty... */
1815 ASSERT (mp->count == 0);
1816 mp->vrf_id = ntohl (mfib->mft_table_id);
1819 vec_reset_length (do_fibs->mroutes);
1822 * walk the table with table updates blocked
1824 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1826 mfib_table_walk (mfib->mft_index,
1827 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1830 vec_foreach (pfx, do_fibs->mroutes)
1832 const dpo_id_t *dpo_id;
1833 fib_node_index_t mfei;
1838 * re-lookup the entry, since we suspend during the collection
1840 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1842 if (FIB_NODE_INDEX_INVALID == mfei)
1845 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1846 index = (u32) dpo_id->dpoi_index;
1848 vlib_get_combined_counter (&replicate_main.repm_counters,
1849 dpo_id->dpoi_index, &c);
1851 * If it has seen at least one packet, send it.
1855 /* already in net byte order */
1856 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1857 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1858 ctrp->group_length = pfx->fp_len;
1859 ctrp->packets = clib_host_to_net_u64 (c.packets);
1860 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1864 if (mp->count == items_this_message)
1866 mp->count = htonl (items_this_message);
1868 * If the main thread's input queue is stuffed,
1869 * drop the data structure lock (which the main thread
1870 * may want), and take a pause.
1874 while (svm_queue_is_full (q))
1876 svm_queue_unlock (q);
1877 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1878 STATS_RELEASE_DELAY_NS);
1881 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1882 svm_queue_unlock (q);
1884 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1885 mp = vl_msg_api_alloc_as_if_client
1887 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1888 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1890 mp->vrf_id = ntohl (mfib->mft_table_id);
1891 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1896 /* Flush any data from this mfib */
1899 mp->count = htonl (mp->count);
1900 vl_msg_api_send_shmem (q, (u8 *) & mp);
1905 /* If e.g. the last FIB had no reportable routes, free the buffer */
1907 vl_msg_api_free (mp);
1913 ip6_route_t **routep;
1915 } add_routes_in_fib_arg_t;
1918 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1920 add_routes_in_fib_arg_t *ap = arg;
1921 stats_main_t *sm = ap->sm;
1923 if (sm->data_structure_lock->release_hint)
1924 clib_longjmp (&sm->jmp_buf, 1);
1926 if (kvp->key[2] >> 32 == ap->fib_index)
1928 ip6_address_t *addr;
1930 addr = (ip6_address_t *) kvp;
1931 vec_add2 (*ap->routep, r, 1);
1932 r->address = addr[0];
1933 r->address_length = kvp->key[2] & 0xFF;
1934 r->index = kvp->value;
1939 do_ip6_fib_counters (stats_main_t * sm)
1941 ip6_main_t *im6 = &ip6_main;
1942 api_main_t *am = sm->api_main;
1943 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1944 svm_queue_t *q = shmem_hdr->vl_input_queue;
1947 do_ip46_fibs_t *do_fibs;
1948 vl_api_vnet_ip6_fib_counters_t *mp = 0;
1949 u32 items_this_message;
1950 vl_api_ip6_fib_counter_t *ctrp = 0;
1951 u32 start_at_fib_index = 0;
1952 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
1953 add_routes_in_fib_arg_t _a, *a = &_a;
1956 do_fibs = &sm->do_ip46_fibs;
1958 vec_reset_length (do_fibs->fibs);
1960 pool_foreach (fib, im6->fibs,
1961 ({vec_add1(do_fibs->fibs,fib);}));
1965 for (i = 0; i < vec_len (do_fibs->fibs); i++)
1967 fib = do_fibs->fibs[i];
1968 /* We may have bailed out due to control-plane activity */
1969 while ((fib - im6->fibs) < start_at_fib_index)
1974 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1975 mp = vl_msg_api_alloc_as_if_client
1977 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1978 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1980 mp->vrf_id = ntohl (fib->ft_table_id);
1981 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1984 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1986 vec_reset_length (do_fibs->ip6routes);
1987 vec_reset_length (do_fibs->results);
1989 a->fib_index = fib - im6->fibs;
1990 a->routep = &do_fibs->ip6routes;
1993 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
1995 start_at_fib_index = fib - im6->fibs;
1996 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
2001 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2002 STATS_RELEASE_DELAY_NS);
2004 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2008 vec_foreach (r, do_fibs->ip6routes)
2012 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2015 * If it has actually
2016 * seen at least one packet, send it.
2020 /* already in net byte order */
2021 ctrp->address[0] = r->address.as_u64[0];
2022 ctrp->address[1] = r->address.as_u64[1];
2023 ctrp->address_length = (u8) r->address_length;
2024 ctrp->packets = clib_host_to_net_u64 (c.packets);
2025 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2029 if (mp->count == items_this_message)
2031 mp->count = htonl (items_this_message);
2033 * If the main thread's input queue is stuffed,
2034 * drop the data structure lock (which the main thread
2035 * may want), and take a pause.
2038 if (svm_queue_is_full (q))
2041 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2042 svm_queue_unlock (q);
2044 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2045 STATS_RELEASE_DELAY_NS);
2048 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2049 svm_queue_unlock (q);
2051 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2052 mp = vl_msg_api_alloc_as_if_client
2054 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2055 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2057 mp->vrf_id = ntohl (fib->ft_table_id);
2058 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2062 if (sm->data_structure_lock->release_hint)
2064 start_at_fib_index = fib - im6->fibs;
2066 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2068 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2071 } /* vec_foreach (routes) */
2075 /* Flush any data from this fib */
2078 mp->count = htonl (mp->count);
2079 vl_msg_api_send_shmem (q, (u8 *) & mp);
2084 /* If e.g. the last FIB had no reportable routes, free the buffer */
2086 vl_msg_api_free (mp);
2089 typedef struct udp_encap_stat_t_
2095 typedef struct udp_encap_stats_walk_t_
2097 udp_encap_stat_t *stats;
2098 } udp_encap_stats_walk_t;
2101 udp_encap_stats_walk_cb (index_t uei, void *arg)
2103 udp_encap_stats_walk_t *ctx = arg;
2104 udp_encap_stat_t *stat;
2107 ue = udp_encap_get (uei);
2108 vec_add2 (ctx->stats, stat, 1);
2111 udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]);
2117 udp_encap_ship (udp_encap_stats_walk_t * ctx)
2119 vl_api_vnet_udp_encap_counters_t *mp;
2120 vl_shmem_hdr_t *shmem_hdr;
2128 shmem_hdr = am->shmem_hdr;
2129 q = shmem_hdr->vl_input_queue;
2132 * If the walk context has counters, which may be left over from the last
2133 * suspend, then we continue from there.
2135 while (0 != vec_len (ctx->stats))
2137 u32 n_items = MIN (vec_len (ctx->stats),
2138 UDP_ENCAP_COUNTER_BATCH_SIZE);
2141 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2143 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2146 (vl_api_udp_encap_counter_t)));
2147 mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2148 mp->count = ntohl (n_items);
2151 * copy the counters from the back of the context, then we can easily
2152 * 'erase' them by resetting the vector length.
2153 * The order we push the stats to the caller is not important.
2156 &ctx->stats[vec_len (ctx->stats) - n_items],
2157 n_items * sizeof (*ctx->stats));
2159 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2165 pause = svm_queue_is_full (q);
2167 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2168 svm_queue_unlock (q);
2172 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2173 STATS_RELEASE_DELAY_NS);
2178 do_udp_encap_counters (stats_main_t * sm)
2180 udp_encap_stat_t *stat;
2182 udp_encap_stats_walk_t ctx = {
2186 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2187 udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
2190 udp_encap_ship (&ctx);
2194 stats_set_poller_delay (u32 poller_delay_sec)
2196 stats_main_t *sm = &stats_main;
2197 if (!poller_delay_sec)
2199 return VNET_API_ERROR_INVALID_ARGUMENT;
2203 sm->stats_poll_interval_in_seconds = poller_delay_sec;
2208 static clib_error_t *
2209 stats_config (vlib_main_t * vm, unformat_input_t * input)
2213 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2215 if (unformat (input, "interval %u", &sec))
2217 int rv = stats_set_poller_delay (sec);
2220 return clib_error_return (0,
2221 "`stats_set_poller_delay' API call failed, rv=%d:%U",
2222 (int) rv, format_vnet_api_errno, rv);
2228 return clib_error_return (0, "unknown input '%U'",
2229 format_unformat_error, input);
2235 /* stats { ... } configuration. */
2238 * @cfgcmd{interval, <seconds>}
2239 * Configure stats poller delay to be @c seconds.
2242 VLIB_CONFIG_FUNCTION (stats_config, "stats");
2245 vl_api_stats_get_poller_delay_t_handler
2246 (vl_api_stats_get_poller_delay_t * mp)
2248 stats_main_t *sm = &stats_main;
2249 vl_api_registration_t *reg;
2250 reg = vl_api_client_index_to_registration (mp->client_index);
2253 vl_api_stats_get_poller_delay_reply_t *rmp;
2255 rmp = vl_msg_api_alloc (sizeof (*rmp));
2256 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
2257 rmp->context = mp->context;
2259 rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2261 vl_api_send_msg (reg, (u8 *) rmp);
2266 stats_thread_fn (void *arg)
2268 stats_main_t *sm = &stats_main;
2269 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2270 vlib_thread_main_t *tm = vlib_get_thread_main ();
2272 /* stats thread wants no signals. */
2276 pthread_sigmask (SIG_SETMASK, &s, 0);
2279 if (vec_len (tm->thread_prefix))
2280 vlib_set_thread_name ((char *)
2281 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2283 clib_mem_set_heap (w->thread_mheap);
2287 ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
2290 if (!(sm->enable_poller))
2295 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2296 do_combined_per_interface_counters (sm);
2299 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2300 do_simple_per_interface_counters (sm);
2302 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2303 do_ip4_fib_counters (sm);
2305 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2306 do_ip6_fib_counters (sm);
2308 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2309 do_ip4_mfib_counters (sm);
2311 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2312 do_ip6_mfib_counters (sm);
2314 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2315 do_ip4_nbr_counters (sm);
2317 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2318 do_ip6_nbr_counters (sm);
2320 if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS]))
2321 do_udp_encap_counters (sm);
2326 vl_api_vnet_interface_simple_counters_t_handler
2327 (vl_api_vnet_interface_simple_counters_t * mp)
2329 vpe_client_registration_t *clients, client;
2330 stats_main_t *sm = &stats_main;
2331 vl_api_registration_t *reg, *reg_prev = NULL;
2332 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2336 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2339 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2340 ~0 /*flag for all */ );
2342 for (i = 0; i < vec_len (clients); i++)
2344 client = clients[i];
2345 reg = vl_api_client_index_to_registration (client.client_index);
2348 if (reg_prev && vl_api_can_send_msg (reg_prev))
2350 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2351 clib_memcpy (mp_copy, mp, mp_size);
2352 vl_api_send_msg (reg_prev, (u8 *) mp);
2360 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2361 client.client_index);
2368 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2371 if (reg_prev && vl_api_can_send_msg (reg_prev))
2373 vl_api_send_msg (reg_prev, (u8 *) mp);
2377 vl_msg_api_free (mp);
2382 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2384 stats_main_t *sm = &stats_main;
2385 vl_api_registration_t *reg, *reg_prev = NULL;
2386 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2388 vpe_client_registration_t *clients, client;
2391 mp_size = sizeof (*mp_copy) +
2392 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2395 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2397 for (i = 0; i < vec_len (clients); i++)
2399 client = clients[i];
2400 reg = vl_api_client_index_to_registration (client.client_index);
2403 if (reg_prev && vl_api_can_send_msg (reg_prev))
2405 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2406 clib_memcpy (mp_copy, mp, mp_size);
2407 vl_api_send_msg (reg_prev, (u8 *) mp);
2414 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2415 ~0, client.client_index);
2421 if (reg_prev && vl_api_can_send_msg (reg_prev))
2423 vl_api_send_msg (reg_prev, (u8 *) mp);
2427 vl_msg_api_free (mp);
2432 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2434 stats_main_t *sm = &stats_main;
2435 vl_api_registration_t *reg, *reg_prev = NULL;
2436 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2438 vpe_client_registration_t *clients, client;
2441 mp_size = sizeof (*mp_copy) +
2442 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2445 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2447 for (i = 0; i < vec_len (clients); i++)
2449 client = clients[i];
2450 reg = vl_api_client_index_to_registration (client.client_index);
2453 if (reg_prev && vl_api_can_send_msg (reg_prev))
2455 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2456 clib_memcpy (mp_copy, mp, mp_size);
2457 vl_api_send_msg (reg_prev, (u8 *) mp);
2464 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2465 ~0, client.client_index);
2472 if (reg_prev && vl_api_can_send_msg (reg_prev))
2474 vl_api_send_msg (reg_prev, (u8 *) mp);
2478 vl_msg_api_free (mp);
2483 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2485 stats_main_t *sm = &stats_main;
2486 vl_api_registration_t *reg, *reg_prev = NULL;
2487 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2489 vpe_client_registration_t *clients, client;
2492 mp_size = sizeof (*mp_copy) +
2493 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2496 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2498 for (i = 0; i < vec_len (clients); i++)
2500 client = clients[i];
2501 reg = vl_api_client_index_to_registration (client.client_index);
2504 if (reg_prev && vl_api_can_send_msg (reg_prev))
2506 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2507 clib_memcpy (mp_copy, mp, mp_size);
2508 vl_api_send_msg (reg_prev, (u8 *) mp);
2515 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2516 ~0, client.client_index);
2523 if (reg_prev && vl_api_can_send_msg (reg_prev))
2525 vl_api_send_msg (reg_prev, (u8 *) mp);
2529 vl_msg_api_free (mp);
2534 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2536 stats_main_t *sm = &stats_main;
2537 vl_api_registration_t *reg, *reg_prev = NULL;
2538 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2540 vpe_client_registration_t *clients, client;
2543 mp_size = sizeof (*mp_copy) +
2544 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2547 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2549 for (i = 0; i < vec_len (clients); i++)
2551 client = clients[i];
2552 reg = vl_api_client_index_to_registration (client.client_index);
2555 if (reg_prev && vl_api_can_send_msg (reg_prev))
2557 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2558 clib_memcpy (mp_copy, mp, mp_size);
2559 vl_api_send_msg (reg_prev, (u8 *) mp);
2566 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2567 ~0, client.client_index);
2574 if (reg_prev && vl_api_can_send_msg (reg_prev))
2576 vl_api_send_msg (reg_prev, (u8 *) mp);
2580 vl_msg_api_free (mp);
2585 vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
2587 stats_main_t *sm = &stats_main;
2588 vpe_client_registration_t rp;
2589 vl_api_want_udp_encap_stats_reply_t *rmp;
2592 vl_api_registration_t *reg;
2595 fib = ~0; //Using same mechanism as _per_interface_
2596 rp.client_index = mp->client_index;
2597 rp.client_pid = mp->pid;
2599 handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2602 reg = vl_api_client_index_to_registration (mp->client_index);
2606 sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2607 fib, mp->client_index);
2611 rmp = vl_msg_api_alloc (sizeof (*rmp));
2612 rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2613 rmp->context = mp->context;
2614 rmp->retval = retval;
2616 vl_api_send_msg (reg, (u8 *) rmp);
2620 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2622 stats_main_t *sm = &stats_main;
2623 vpe_client_registration_t rp;
2624 vl_api_want_stats_reply_t *rmp;
2628 vl_api_registration_t *reg;
2630 item = ~0; //"ALL THE THINGS IN THE THINGS
2631 rp.client_index = mp->client_index;
2632 rp.client_pid = mp->pid;
2634 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2635 item, mp->enable_disable);
2637 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2638 item, mp->enable_disable);
2640 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2641 item, mp->enable_disable);
2643 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2644 item, mp->enable_disable);
2646 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2647 item, mp->enable_disable);
2649 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2650 item, mp->enable_disable);
2653 reg = vl_api_client_index_to_registration (mp->client_index);
2657 rmp = vl_msg_api_alloc (sizeof (*rmp));
2658 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2659 rmp->context = mp->context;
2660 rmp->retval = retval;
2662 vl_api_send_msg (reg, (u8 *) rmp);
2666 vl_api_want_interface_simple_stats_t_handler
2667 (vl_api_want_interface_simple_stats_t * mp)
2669 stats_main_t *sm = &stats_main;
2670 vpe_client_registration_t rp;
2671 vl_api_want_interface_simple_stats_reply_t *rmp;
2675 vl_api_registration_t *reg;
2677 swif = ~0; //Using same mechanism as _per_interface_
2678 rp.client_index = mp->client_index;
2679 rp.client_pid = mp->pid;
2681 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2682 mp->enable_disable);
2685 reg = vl_api_client_index_to_registration (mp->client_index);
2690 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2695 rmp = vl_msg_api_alloc (sizeof (*rmp));
2696 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2697 rmp->context = mp->context;
2698 rmp->retval = retval;
2700 vl_api_send_msg (reg, (u8 *) rmp);
2705 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2707 stats_main_t *sm = &stats_main;
2708 vpe_client_registration_t rp;
2709 vl_api_want_ip4_fib_stats_reply_t *rmp;
2712 vl_api_registration_t *reg;
2715 fib = ~0; //Using same mechanism as _per_interface_
2716 rp.client_index = mp->client_index;
2717 rp.client_pid = mp->pid;
2719 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2720 mp->enable_disable);
2723 reg = vl_api_client_index_to_registration (mp->client_index);
2727 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2728 fib, mp->client_index);
2732 rmp = vl_msg_api_alloc (sizeof (*rmp));
2733 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2734 rmp->context = mp->context;
2735 rmp->retval = retval;
2737 vl_api_send_msg (reg, (u8 *) rmp);
2741 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2743 stats_main_t *sm = &stats_main;
2744 vpe_client_registration_t rp;
2745 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2748 vl_api_registration_t *reg;
2751 mfib = ~0; //Using same mechanism as _per_interface_
2752 rp.client_index = mp->client_index;
2753 rp.client_pid = mp->pid;
2755 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2756 mp->enable_disable);
2759 reg = vl_api_client_index_to_registration (mp->client_index);
2762 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2763 mfib, mp->client_index);
2767 rmp = vl_msg_api_alloc (sizeof (*rmp));
2768 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2769 rmp->context = mp->context;
2770 rmp->retval = retval;
2772 vl_api_send_msg (reg, (u8 *) rmp);
2776 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2778 stats_main_t *sm = &stats_main;
2779 vpe_client_registration_t rp;
2780 vl_api_want_ip4_fib_stats_reply_t *rmp;
2783 vl_api_registration_t *reg;
2786 fib = ~0; //Using same mechanism as _per_interface_
2787 rp.client_index = mp->client_index;
2788 rp.client_pid = mp->pid;
2790 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2791 mp->enable_disable);
2794 reg = vl_api_client_index_to_registration (mp->client_index);
2797 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2798 fib, mp->client_index);
2802 rmp = vl_msg_api_alloc (sizeof (*rmp));
2803 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2804 rmp->context = mp->context;
2805 rmp->retval = retval;
2807 vl_api_send_msg (reg, (u8 *) rmp);
2811 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2813 stats_main_t *sm = &stats_main;
2814 vpe_client_registration_t rp;
2815 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2818 vl_api_registration_t *reg;
2821 mfib = ~0; //Using same mechanism as _per_interface_
2822 rp.client_index = mp->client_index;
2823 rp.client_pid = mp->pid;
2825 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2826 mp->enable_disable);
2829 reg = vl_api_client_index_to_registration (mp->client_index);
2832 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2833 mfib, mp->client_index);
2837 rmp = vl_msg_api_alloc (sizeof (*rmp));
2838 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2839 rmp->context = mp->context;
2840 rmp->retval = retval;
2842 vl_api_send_msg (reg, (u8 *) rmp);
2845 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2847 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2852 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2857 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2859 stats_main_t *sm = &stats_main;
2860 vnet_interface_main_t *im = sm->interface_main;
2861 vl_api_vnet_get_summary_stats_reply_t *rmp;
2862 vlib_combined_counter_main_t *cm;
2865 u64 total_pkts[VLIB_N_RX_TX];
2866 u64 total_bytes[VLIB_N_RX_TX];
2867 vl_api_registration_t *reg;
2869 reg = vl_api_client_index_to_registration (mp->client_index);
2873 rmp = vl_msg_api_alloc (sizeof (*rmp));
2874 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2875 rmp->context = mp->context;
2878 memset (total_pkts, 0, sizeof (total_pkts));
2879 memset (total_bytes, 0, sizeof (total_bytes));
2881 vnet_interface_counter_lock (im);
2883 vec_foreach (cm, im->combined_sw_if_counters)
2885 which = cm - im->combined_sw_if_counters;
2887 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2889 vlib_get_combined_counter (cm, i, &v);
2890 total_pkts[which] += v.packets;
2891 total_bytes[which] += v.bytes;
2894 vnet_interface_counter_unlock (im);
2896 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2897 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2898 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2899 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2901 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2903 vl_api_send_msg (reg, (u8 *) rmp);
2907 stats_memclnt_delete_callback (u32 client_index)
2909 vpe_client_stats_registration_t *rp;
2910 stats_main_t *sm = &stats_main;
2914 /* p = hash_get (sm->stats_registration_hash, client_index); */
2917 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2918 /* pool_put (sm->stats_registrations, rp); */
2919 /* hash_unset (sm->stats_registration_hash, client_index); */
2925 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2926 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2927 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2928 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2929 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2930 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2931 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2932 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2933 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2934 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2935 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2936 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2938 static clib_error_t *
2939 stats_init (vlib_main_t * vm)
2941 stats_main_t *sm = &stats_main;
2942 api_main_t *am = &api_main;
2943 void *vlib_worker_thread_bootstrap_fn (void *arg);
2946 sm->vnet_main = vnet_get_main ();
2947 sm->interface_main = &vnet_get_main ()->interface_main;
2949 sm->stats_poll_interval_in_seconds = 10;
2950 sm->data_structure_lock =
2951 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
2952 CLIB_CACHE_LINE_BYTES);
2953 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
2956 vl_msg_api_set_handlers(VL_API_##N, #n, \
2957 vl_api_##n##_t_handler, \
2959 vl_api_##n##_t_endian, \
2960 vl_api_##n##_t_print, \
2961 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
2965 /* tell the msg infra not to free these messages... */
2966 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
2967 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
2968 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
2969 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
2970 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
2971 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
2974 * Set up the (msg_name, crc, message-id) table
2976 setup_message_id_table (am);
2978 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
2979 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
2980 #define stats_reg(n) \
2981 sm->stats_registrations[IDX_##n] = 0; \
2982 sm->stats_registration_hash[IDX_##n] = 0;
2983 #include <vpp/stats/stats.reg>
2989 VLIB_INIT_FUNCTION (stats_init);
2992 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
2994 .function = stats_thread_fn,
2997 .no_data_structure_clone = 1,
3003 * fd.io coding-style-patch-verification: ON
3006 * eval: (c-set-style "gnu")