2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
25 stats_main_t stats_main;
27 #include <vnet/ip/ip.h>
29 #include <vpp/api/vpe_msg_enum.h>
32 #define f64_print(a,b)
34 #define vl_typedefs /* define message structures */
35 #include <vpp/api/vpe_all_api_h.h>
38 #define vl_endianfun /* define message structures */
39 #include <vpp/api/vpe_all_api_h.h>
42 /* instantiate all the print functions we know about */
43 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
45 #include <vpp/api/vpe_all_api_h.h>
48 #define foreach_stats_msg \
49 _(WANT_STATS, want_stats) \
50 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
51 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
52 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
53 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
55 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
56 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
57 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
58 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
59 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
60 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
61 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
62 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
63 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
64 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
65 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
66 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
67 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
68 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats)
70 #define vl_msg_name_crc_list
71 #include <vpp/stats/stats.api.h>
72 #undef vl_msg_name_crc_list
75 setup_message_id_table (api_main_t * am)
78 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
79 foreach_vl_msg_name_crc_stats;
83 /* These constants ensure msg sizes <= 1024, aka ring allocation */
84 #define SIMPLE_COUNTER_BATCH_SIZE 126
85 #define COMBINED_COUNTER_BATCH_SIZE 63
86 #define IP4_FIB_COUNTER_BATCH_SIZE 48
87 #define IP6_FIB_COUNTER_BATCH_SIZE 30
88 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
89 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
90 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
93 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
97 format_vnet_interface_combined_counters (u8 * s, va_list * args)
99 stats_main_t *sm = &stats_main;
100 vl_api_vnet_interface_combined_counters_t *mp =
101 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
104 u32 count, sw_if_index;
106 count = ntohl (mp->count);
107 sw_if_index = ntohl (mp->first_sw_if_index);
111 vp = (vlib_counter_t *) mp->data;
113 switch (mp->vnet_counter_type)
115 case VNET_INTERFACE_COUNTER_RX:
118 case VNET_INTERFACE_COUNTER_TX:
122 counter_name = "bogus";
125 for (i = 0; i < count; i++)
127 packets = clib_mem_unaligned (&vp->packets, u64);
128 packets = clib_net_to_host_u64 (packets);
129 bytes = clib_mem_unaligned (&vp->bytes, u64);
130 bytes = clib_net_to_host_u64 (bytes);
132 s = format (s, "%U.%s.packets %lld\n",
133 format_vnet_sw_if_index_name,
134 sm->vnet_main, sw_if_index, counter_name, packets);
135 s = format (s, "%U.%s.bytes %lld\n",
136 format_vnet_sw_if_index_name,
137 sm->vnet_main, sw_if_index, counter_name, bytes);
144 format_vnet_interface_simple_counters (u8 * s, va_list * args)
146 stats_main_t *sm = &stats_main;
147 vl_api_vnet_interface_simple_counters_t *mp =
148 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
150 u32 count, sw_if_index;
151 count = ntohl (mp->count);
152 sw_if_index = ntohl (mp->first_sw_if_index);
154 vp = (u64 *) mp->data;
157 switch (mp->vnet_counter_type)
159 case VNET_INTERFACE_COUNTER_DROP:
160 counter_name = "drop";
162 case VNET_INTERFACE_COUNTER_PUNT:
163 counter_name = "punt";
165 case VNET_INTERFACE_COUNTER_IP4:
166 counter_name = "ip4";
168 case VNET_INTERFACE_COUNTER_IP6:
169 counter_name = "ip6";
171 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
172 counter_name = "rx-no-buff";
174 case VNET_INTERFACE_COUNTER_RX_MISS:
175 counter_name = "rx-miss";
177 case VNET_INTERFACE_COUNTER_RX_ERROR:
178 counter_name = "rx-error (fifo-full)";
180 case VNET_INTERFACE_COUNTER_TX_ERROR:
181 counter_name = "tx-error (fifo-full)";
184 counter_name = "bogus";
187 for (i = 0; i < count; i++)
189 v = clib_mem_unaligned (vp, u64);
190 v = clib_net_to_host_u64 (v);
192 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
193 sm->vnet_main, sw_if_index, counter_name, v);
201 dslock (stats_main_t * sm, int release_hint, int tag)
204 data_structure_lock_t *l = sm->data_structure_lock;
206 if (PREDICT_FALSE (l == 0))
209 thread_index = vlib_get_thread_index ();
210 if (l->lock && l->thread_index == thread_index)
219 while (__sync_lock_test_and_set (&l->lock, 1))
222 l->thread_index = thread_index;
227 stats_dslock_with_hint (int hint, int tag)
229 stats_main_t *sm = &stats_main;
230 dslock (sm, hint, tag);
234 dsunlock (stats_main_t * sm)
237 data_structure_lock_t *l = sm->data_structure_lock;
239 if (PREDICT_FALSE (l == 0))
242 thread_index = vlib_get_thread_index ();
243 ASSERT (l->lock && l->thread_index == thread_index);
249 CLIB_MEMORY_BARRIER ();
255 stats_dsunlock (int hint, int tag)
257 stats_main_t *sm = &stats_main;
261 static vpe_client_registration_t *
262 get_client_for_stat (u32 reg, u32 item, u32 client_index)
264 stats_main_t *sm = &stats_main;
265 vpe_client_stats_registration_t *registration;
268 /* Is there anything listening for item in that reg */
269 p = hash_get (sm->stats_registration_hash[reg], item);
274 /* If there is, is our client_index one of them */
275 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
276 p = hash_get (registration->client_hash, client_index);
281 return pool_elt_at_index (registration->clients, p[0]);
286 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
288 stats_main_t *sm = &stats_main;
289 vpe_client_stats_registration_t *registration;
290 vpe_client_registration_t *cr;
293 /* Is there anything listening for item in that reg */
294 p = hash_get (sm->stats_registration_hash[reg], item);
298 pool_get (sm->stats_registrations[reg], registration);
299 registration->item = item;
300 hash_set (sm->stats_registration_hash[reg], item,
301 registration - sm->stats_registrations[reg]);
305 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
308 p = hash_get (registration->client_hash, client->client_index);
312 pool_get (registration->clients, cr);
313 cr->client_index = client->client_index;
314 cr->client_pid = client->client_pid;
315 hash_set (registration->client_hash, cr->client_index,
316 cr - registration->clients);
319 return 1; //At least one client is doing something ... poll
323 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
325 stats_main_t *sm = &stats_main;
326 vpe_client_stats_registration_t *registration;
327 vpe_client_registration_t *client;
331 /* Clear the client first */
332 /* Is there anything listening for item in that reg */
333 p = hash_get (sm->stats_registration_hash[reg], item);
338 /* If there is, is our client_index one of them */
339 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
340 p = hash_get (registration->client_hash, client_index);
345 client = pool_elt_at_index (registration->clients, p[0]);
346 hash_unset (registration->client_hash, client->client_index);
347 pool_put (registration->clients, client);
349 /* Now check if that was the last client for that item */
350 if (0 == pool_elts (registration->clients))
352 hash_unset (sm->stats_registration_hash[reg], item);
353 pool_put (sm->stats_registrations[reg], registration);
358 /* Now check if that was the last item in any of the listened to stats */
359 for (i = 0; i < STATS_REG_N_IDX; i++)
361 elts += pool_elts (sm->stats_registrations[i]);
366 vpe_client_registration_t *
367 get_clients_for_stat (u32 reg, u32 item)
369 stats_main_t *sm = &stats_main;
370 vpe_client_registration_t *client, *clients = 0;
371 vpe_client_stats_registration_t *registration;
374 /* Is there anything listening for item in that reg */
375 p = hash_get (sm->stats_registration_hash[reg], item);
380 /* If there is, is our client_index one of them */
381 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
383 vec_reset_length (clients);
384 pool_foreach (client, registration->clients, (
386 vec_add1 (clients, *client);}
393 clear_client_reg (u32 ** registrations)
395 /* When registrations[x] is a vector of pool indices
396 here is a good place to clean up the pools
398 #define stats_reg(n) vec_free(registrations[IDX_##n]);
399 #include <vpp/stats/stats.reg>
402 vec_free (registrations);
406 init_client_reg (u32 ** registrations)
410 Initialise the stats registrations for each
411 type of stat a client can register for as well as
412 a vector of "interested" indexes.
413 Initially this is a u32 of either sw_if_index or fib_index
414 but eventually this should migrate to a pool_index (u32)
415 with a type specific pool that can include more complex things
416 such as timing and structured events.
418 vec_validate (registrations, STATS_REG_N_IDX);
419 #define stats_reg(n) \
420 vec_reset_length(registrations[IDX_##n]);
421 #include <vpp/stats/stats.reg>
425 When registrations[x] is a vector of pool indices, here
426 is a good place to init the pools.
428 return registrations;
432 enable_all_client_reg (u32 ** registrations)
436 Enable all stats known by adding
437 ~0 to the index vector. Eventually this
438 should be deprecated.
440 #define stats_reg(n) \
441 vec_add1(registrations[IDX_##n], ~0);
442 #include <vpp/stats/stats.reg>
444 return registrations;
448 do_simple_interface_counters (stats_main_t * sm)
450 vl_api_vnet_interface_simple_counters_t *mp = 0;
451 vnet_interface_main_t *im = sm->interface_main;
452 api_main_t *am = sm->api_main;
453 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
454 svm_queue_t *q = shmem_hdr->vl_input_queue;
455 vlib_simple_counter_main_t *cm;
456 u32 items_this_message = 0;
461 * Prevent interface registration from expanding / moving the vectors...
462 * That tends never to happen, so we can hold this lock for a while.
464 vnet_interface_counter_lock (im);
466 vec_foreach (cm, im->sw_if_counters)
468 n_counts = vlib_simple_counter_n_counters (cm);
469 for (i = 0; i < n_counts; i++)
473 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
476 mp = vl_msg_api_alloc_as_if_client
477 (sizeof (*mp) + items_this_message * sizeof (v));
478 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
479 mp->vnet_counter_type = cm - im->sw_if_counters;
480 mp->first_sw_if_index = htonl (i);
482 vp = (u64 *) mp->data;
484 v = vlib_get_simple_counter (cm, i);
485 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
488 if (mp->count == items_this_message)
490 mp->count = htonl (items_this_message);
491 /* Send to the main thread... */
492 vl_msg_api_send_shmem (q, (u8 *) & mp);
498 vnet_interface_counter_unlock (im);
502 handle_client_registration (vpe_client_registration_t * client, u32 stat,
503 u32 item, int enable_disable)
505 stats_main_t *sm = &stats_main;
506 vpe_client_registration_t *rp, _rp;
508 rp = get_client_for_stat (stat, item, client->client_index);
511 if (enable_disable == 0)
513 if (!rp) // No client to disable
515 clib_warning ("pid %d: already disabled for stats...",
520 clear_client_for_stat (stat, item, client->client_index);
527 rp->client_index = client->client_index;
528 rp->client_pid = client->client_pid;
529 sm->enable_poller = set_client_for_stat (stat, item, rp);
534 /**********************************
535 * ALL Interface Combined stats - to be deprecated
536 **********************************/
539 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
542 vl_api_want_interface_combined_stats_t_handler
543 (vl_api_want_interface_combined_stats_t * mp)
545 stats_main_t *sm = &stats_main;
546 vpe_client_registration_t rp;
547 vl_api_want_interface_combined_stats_reply_t *rmp;
550 vl_api_registration_t *reg;
553 swif = ~0; //Using same mechanism as _per_interface_
554 rp.client_index = mp->client_index;
555 rp.client_pid = mp->pid;
557 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
561 reg = vl_api_client_index_to_registration (mp->client_index);
565 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
570 rmp = vl_msg_api_alloc (sizeof (*rmp));
571 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
572 rmp->context = mp->context;
573 rmp->retval = retval;
575 vl_api_send_msg (reg, (u8 *) rmp);
579 vl_api_vnet_interface_combined_counters_t_handler
580 (vl_api_vnet_interface_combined_counters_t * mp)
582 vpe_client_registration_t *clients, client;
583 stats_main_t *sm = &stats_main;
584 vl_api_registration_t *reg, *reg_prev = NULL;
585 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
589 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
592 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
593 ~0 /*flag for all */ );
595 for (i = 0; i < vec_len (clients); i++)
598 reg = vl_api_client_index_to_registration (client.client_index);
601 if (reg_prev && vl_api_can_send_msg (reg_prev))
603 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
604 clib_memcpy (mp_copy, mp, mp_size);
605 vl_api_send_msg (reg_prev, (u8 *) mp);
612 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
615 if (reg_prev && vl_api_can_send_msg (reg_prev))
617 vl_api_send_msg (reg_prev, (u8 *) mp);
621 vl_msg_api_free (mp);
626 do_combined_interface_counters (stats_main_t * sm)
628 vl_api_vnet_interface_combined_counters_t *mp = 0;
629 vnet_interface_main_t *im = sm->interface_main;
630 api_main_t *am = sm->api_main;
631 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
632 svm_queue_t *q = shmem_hdr->vl_input_queue;
633 vlib_combined_counter_main_t *cm;
634 u32 items_this_message = 0;
635 vlib_counter_t v, *vp = 0;
638 vnet_interface_counter_lock (im);
640 vec_foreach (cm, im->combined_sw_if_counters)
642 n_counts = vlib_combined_counter_n_counters (cm);
643 for (i = 0; i < n_counts; i++)
647 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
650 mp = vl_msg_api_alloc_as_if_client
651 (sizeof (*mp) + items_this_message * sizeof (v));
652 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
653 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
654 mp->first_sw_if_index = htonl (i);
656 vp = (vlib_counter_t *) mp->data;
658 vlib_get_combined_counter (cm, i, &v);
659 clib_mem_unaligned (&vp->packets, u64)
660 = clib_host_to_net_u64 (v.packets);
661 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
664 if (mp->count == items_this_message)
666 mp->count = htonl (items_this_message);
667 /* Send to the main thread... */
668 vl_msg_api_send_shmem (q, (u8 *) & mp);
674 vnet_interface_counter_unlock (im);
677 /**********************************
678 * Per Interface Combined stats
679 **********************************/
681 /* Request from client registering interfaces it wants */
683 vl_api_want_per_interface_combined_stats_t_handler
684 (vl_api_want_per_interface_combined_stats_t * mp)
686 stats_main_t *sm = &stats_main;
687 vpe_client_registration_t rp;
688 vl_api_want_per_interface_combined_stats_reply_t *rmp;
689 vlib_combined_counter_main_t *cm;
692 vl_api_registration_t *reg;
693 u32 i, swif, num = 0;
695 num = ntohl (mp->num);
698 * Validate sw_if_indexes before registering
700 for (i = 0; i < num; i++)
702 swif = ntohl (mp->sw_ifs[i]);
705 * Check its a real sw_if_index that the client is allowed to see
709 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
711 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
717 for (i = 0; i < num; i++)
719 swif = ntohl (mp->sw_ifs[i]);
721 rp.client_index = mp->client_index;
722 rp.client_pid = mp->pid;
723 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
724 swif, ntohl (mp->enable_disable));
728 reg = vl_api_client_index_to_registration (mp->client_index);
731 for (i = 0; i < num; i++)
733 swif = ntohl (mp->sw_ifs[i]);
736 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
742 rmp = vl_msg_api_alloc (sizeof (*rmp));
743 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
744 rmp->context = mp->context;
745 rmp->retval = retval;
747 vl_api_send_msg (reg, (u8 *) rmp);
750 /* Per Interface Combined distribution to client */
752 do_combined_per_interface_counters (stats_main_t * sm)
754 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
755 vnet_interface_main_t *im = sm->interface_main;
756 api_main_t *am = sm->api_main;
757 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
758 vl_api_registration_t *vl_reg;
759 vlib_combined_counter_main_t *cm;
760 vl_api_vnet_combined_counter_t *vp = 0;
763 vpe_client_stats_registration_t *reg;
764 vpe_client_registration_t *client;
765 u32 *sw_if_index = 0;
767 vnet_interface_counter_lock (im);
769 vec_reset_length (sm->regs_tmp);
773 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
774 ({ vec_add1 (sm->regs_tmp, reg); }));
777 for (i = 0; i < vec_len (sm->regs_tmp); i++)
779 reg = sm->regs_tmp[i];
782 vnet_interface_counter_unlock (im);
783 do_combined_interface_counters (sm);
784 vnet_interface_counter_lock (im);
787 vec_reset_length (sm->clients_tmp);
790 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
794 for (j = 0; j < vec_len (sm->clients_tmp); j++)
796 client = sm->clients_tmp[j];
798 vl_reg = vl_api_client_index_to_registration (client->client_index);
800 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
804 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
805 reg->item, client->client_index);
808 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
809 memset (mp, 0, sizeof (*mp));
812 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
815 * count will eventually be used to optimise the batching
816 * of per client messages for each stat. For now setting this to 1 then
817 * iterate. This will not affect API.
819 * FIXME instead of enqueueing here, this should be sent to a batch
820 * storer for per-client transmission. Each "mp" sent would be a single entry
821 * and if a client is listening to other sw_if_indexes for same, it would be
822 * appended to that *mp
826 * - capturing the timestamp of the counters "when VPP knew them" is important.
827 * Less so is that the timing of the delivery to the control plane be in the same
830 * i.e. As long as the control plane can delta messages from VPP and work out
831 * velocity etc based on the timestamp, it can do so in a more "batch mode".
833 * It would be beneficial to keep a "per-client" message queue, and then
834 * batch all the stat messages for a client into one message, with
835 * discrete timestamps.
837 * Given this particular API is for "per interface" one assumes that the scale
838 * is less than the ~0 case, which the prior API is suited for.
842 * 1 message per api call for now
844 mp->count = htonl (1);
845 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
847 vp = (vl_api_vnet_combined_counter_t *) mp->data;
848 vp->sw_if_index = htonl (reg->item);
850 im = &vnet_get_main ()->interface_main;
851 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
852 vlib_get_combined_counter (cm, reg->item, &v);
853 clib_mem_unaligned (&vp->rx_packets, u64) =
854 clib_host_to_net_u64 (v.packets);
855 clib_mem_unaligned (&vp->rx_bytes, u64) =
856 clib_host_to_net_u64 (v.bytes);
857 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
858 vlib_get_combined_counter (cm, reg->item, &v);
859 clib_mem_unaligned (&vp->tx_packets, u64) =
860 clib_host_to_net_u64 (v.packets);
861 clib_mem_unaligned (&vp->tx_bytes, u64) =
862 clib_host_to_net_u64 (v.bytes);
864 vl_api_send_msg (vl_reg, (u8 *) mp);
868 vnet_interface_counter_unlock (im);
871 /**********************************
872 * Per Interface simple stats
873 **********************************/
875 /* Request from client registering interfaces it wants */
877 vl_api_want_per_interface_simple_stats_t_handler
878 (vl_api_want_per_interface_simple_stats_t * mp)
880 stats_main_t *sm = &stats_main;
881 vpe_client_registration_t rp;
882 vl_api_want_per_interface_simple_stats_reply_t *rmp;
883 vlib_simple_counter_main_t *cm;
886 vl_api_registration_t *reg;
887 u32 i, swif, num = 0;
889 num = ntohl (mp->num);
891 for (i = 0; i < num; i++)
893 swif = ntohl (mp->sw_ifs[i]);
895 /* Check its a real sw_if_index that the client is allowed to see */
898 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
900 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
906 for (i = 0; i < num; i++)
908 swif = ntohl (mp->sw_ifs[i]);
910 rp.client_index = mp->client_index;
911 rp.client_pid = mp->pid;
912 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
913 swif, ntohl (mp->enable_disable));
917 reg = vl_api_client_index_to_registration (mp->client_index);
919 /* Client may have disconnected abruptly, clean up */
922 for (i = 0; i < num; i++)
924 swif = ntohl (mp->sw_ifs[i]);
926 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
934 rmp = vl_msg_api_alloc (sizeof (*rmp));
935 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
936 rmp->context = mp->context;
937 rmp->retval = retval;
939 vl_api_send_msg (reg, (u8 *) rmp);
942 /* Per Interface Simple distribution to client */
944 do_simple_per_interface_counters (stats_main_t * sm)
946 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
947 vnet_interface_main_t *im = sm->interface_main;
948 api_main_t *am = sm->api_main;
949 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
950 vl_api_registration_t *vl_reg;
951 vlib_simple_counter_main_t *cm;
953 vpe_client_stats_registration_t *reg;
954 vpe_client_registration_t *client;
955 u32 timestamp, count;
956 vl_api_vnet_simple_counter_t *vp = 0;
959 vnet_interface_counter_lock (im);
961 vec_reset_length (sm->regs_tmp);
965 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
966 ({ vec_add1 (sm->regs_tmp, reg); }));
969 for (i = 0; i < vec_len (sm->regs_tmp); i++)
971 reg = sm->regs_tmp[i];
974 vnet_interface_counter_unlock (im);
975 do_simple_interface_counters (sm);
976 vnet_interface_counter_lock (im);
979 vec_reset_length (sm->clients_tmp);
982 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
986 for (j = 0; j < vec_len (sm->clients_tmp); j++)
988 client = sm->clients_tmp[j];
989 vl_reg = vl_api_client_index_to_registration (client->client_index);
991 /* Client may have disconnected abrubtly, clean up */
995 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
996 reg->item, client->client_index);
1000 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1001 memset (mp, 0, sizeof (*mp));
1002 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1005 * count will eventually be used to optimise the batching
1006 * of per client messages for each stat. For now setting this to 1 then
1007 * iterate. This will not affect API.
1009 * FIXME instead of enqueueing here, this should be sent to a batch
1010 * storer for per-client transmission. Each "mp" sent would be a single entry
1011 * and if a client is listening to other sw_if_indexes for same, it would be
1012 * appended to that *mp
1016 * - capturing the timestamp of the counters "when VPP knew them" is important.
1017 * Less so is that the timing of the delivery to the control plane be in the same
1020 * i.e. As long as the control plane can delta messages from VPP and work out
1021 * velocity etc based on the timestamp, it can do so in a more "batch mode".
1023 * It would be beneficial to keep a "per-client" message queue, and then
1024 * batch all the stat messages for a client into one message, with
1025 * discrete timestamps.
1027 * Given this particular API is for "per interface" one assumes that the scale
1028 * is less than the ~0 case, which the prior API is suited for.
1032 * 1 message per api call for now
1034 mp->count = htonl (1);
1035 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1036 vp = (vl_api_vnet_simple_counter_t *) mp->data;
1038 vp->sw_if_index = htonl (reg->item);
1040 // VNET_INTERFACE_COUNTER_DROP
1041 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1042 v = vlib_get_simple_counter (cm, reg->item);
1043 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1045 // VNET_INTERFACE_COUNTER_PUNT
1046 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1047 v = vlib_get_simple_counter (cm, reg->item);
1048 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1050 // VNET_INTERFACE_COUNTER_IP4
1051 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1052 v = vlib_get_simple_counter (cm, reg->item);
1053 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1055 //VNET_INTERFACE_COUNTER_IP6
1056 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1057 v = vlib_get_simple_counter (cm, reg->item);
1058 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1060 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1061 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1062 v = vlib_get_simple_counter (cm, reg->item);
1063 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1064 clib_host_to_net_u64 (v);
1066 //VNET_INTERFACE_COUNTER_RX_MISS
1067 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1068 v = vlib_get_simple_counter (cm, reg->item);
1069 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1071 //VNET_INTERFACE_COUNTER_RX_ERROR
1072 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1073 v = vlib_get_simple_counter (cm, reg->item);
1074 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1076 //VNET_INTERFACE_COUNTER_TX_ERROR
1077 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1078 v = vlib_get_simple_counter (cm, reg->item);
1079 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1081 //VNET_INTERFACE_COUNTER_MPLS
1082 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1083 v = vlib_get_simple_counter (cm, reg->item);
1084 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1086 vl_api_send_msg (vl_reg, (u8 *) mp);
1090 vnet_interface_counter_unlock (im);
1093 /**********************************
1095 **********************************/
1098 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1100 struct timespec _req, *req = &_req;
1101 struct timespec _rem, *rem = &_rem;
1104 req->tv_nsec = nsec;
1107 if (nanosleep (req, rem) == 0)
1112 clib_unix_warning ("nanosleep");
1118 * @brief The context passed when collecting adjacency counters
1120 typedef struct ip4_nbr_stats_ctx_t_
1123 * The SW IF index all these adjs belong to
1128 * A vector of ip4 nbr counters
1130 vl_api_ip4_nbr_counter_t *counters;
1131 } ip4_nbr_stats_ctx_t;
1133 static adj_walk_rc_t
1134 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1136 vl_api_ip4_nbr_counter_t *vl_counter;
1137 vlib_counter_t adj_counter;
1138 ip4_nbr_stats_ctx_t *ctx;
1139 ip_adjacency_t *adj;
1142 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1144 if (0 != adj_counter.packets)
1146 vec_add2 (ctx->counters, vl_counter, 1);
1149 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1150 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1151 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1152 vl_counter->link_type = adj->ia_link;
1154 return (ADJ_WALK_RC_CONTINUE);
1157 #define MIN(x,y) (((x)<(y))?(x):(y))
1160 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1162 api_main_t *am = sm->api_main;
1163 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1164 svm_queue_t *q = shmem_hdr->vl_input_queue;
1165 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1169 * If the walk context has counters, which may be left over from the last
1170 * suspend, then we continue from there.
1172 while (0 != vec_len (ctx->counters))
1174 u32 n_items = MIN (vec_len (ctx->counters),
1175 IP4_FIB_COUNTER_BATCH_SIZE);
1178 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1180 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1183 (vl_api_ip4_nbr_counter_t)));
1184 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1185 mp->count = ntohl (n_items);
1186 mp->sw_if_index = ntohl (ctx->sw_if_index);
1191 * copy the counters from the back of the context, then we can easily
1192 * 'erase' them by resetting the vector length.
1193 * The order we push the stats to the caller is not important.
1196 &ctx->counters[vec_len (ctx->counters) - n_items],
1197 n_items * sizeof (*ctx->counters));
1199 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1205 pause = svm_queue_is_full (q);
1207 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1208 svm_queue_unlock (q);
1212 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1213 STATS_RELEASE_DELAY_NS);
1218 do_ip4_nbr_counters (stats_main_t * sm)
1220 vnet_main_t *vnm = vnet_get_main ();
1221 vnet_interface_main_t *im = &vnm->interface_main;
1222 vnet_sw_interface_t *si;
1224 ip4_nbr_stats_ctx_t ctx = {
1230 pool_foreach (si, im->sw_interfaces,
1233 * update the interface we are now concerned with
1235 ctx.sw_if_index = si->sw_if_index;
1238 * we are about to walk another interface, so we shouldn't have any pending
1241 ASSERT(ctx.counters == NULL);
1244 * visit each neighbour adjacency on the interface and collect
1245 * its current stats.
1246 * Because we hold the lock the walk is synchronous, so safe to routing
1247 * updates. It's limited in work by the number of adjacenies on an
1248 * interface, which is typically not huge.
1250 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1251 adj_nbr_walk (si->sw_if_index,
1258 * if this interface has some adjacencies with counters then ship them,
1259 * else continue to the next interface.
1261 if (NULL != ctx.counters)
1263 ip4_nbr_ship(sm, &ctx);
1270 * @brief The context passed when collecting adjacency counters
1272 typedef struct ip6_nbr_stats_ctx_t_
1275 * The SW IF index all these adjs belong to
1280 * A vector of ip6 nbr counters
1282 vl_api_ip6_nbr_counter_t *counters;
1283 } ip6_nbr_stats_ctx_t;
1285 static adj_walk_rc_t
1286 ip6_nbr_stats_cb (adj_index_t ai,
1289 vl_api_ip6_nbr_counter_t *vl_counter;
1290 vlib_counter_t adj_counter;
1291 ip6_nbr_stats_ctx_t *ctx;
1292 ip_adjacency_t *adj;
1295 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1297 if (0 != adj_counter.packets)
1299 vec_add2(ctx->counters, vl_counter, 1);
1302 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1303 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1304 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1305 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1306 vl_counter->link_type = adj->ia_link;
1308 return (ADJ_WALK_RC_CONTINUE);
1311 #define MIN(x,y) (((x)<(y))?(x):(y))
1314 ip6_nbr_ship (stats_main_t * sm,
1315 ip6_nbr_stats_ctx_t *ctx)
1317 api_main_t *am = sm->api_main;
1318 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1319 svm_queue_t *q = shmem_hdr->vl_input_queue;
1320 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1324 * If the walk context has counters, which may be left over from the last
1325 * suspend, then we continue from there.
1327 while (0 != vec_len(ctx->counters))
1329 u32 n_items = MIN (vec_len (ctx->counters),
1330 IP6_FIB_COUNTER_BATCH_SIZE);
1333 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1335 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1338 (vl_api_ip6_nbr_counter_t)));
1339 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1340 mp->count = ntohl (n_items);
1341 mp->sw_if_index = ntohl (ctx->sw_if_index);
1346 * copy the counters from the back of the context, then we can easily
1347 * 'erase' them by resetting the vector length.
1348 * The order we push the stats to the caller is not important.
1351 &ctx->counters[vec_len (ctx->counters) - n_items],
1352 n_items * sizeof (*ctx->counters));
1354 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1360 pause = svm_queue_is_full (q);
1362 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1363 svm_queue_unlock (q);
1367 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1368 STATS_RELEASE_DELAY_NS);
1373 do_ip6_nbr_counters (stats_main_t * sm)
1375 vnet_main_t *vnm = vnet_get_main ();
1376 vnet_interface_main_t *im = &vnm->interface_main;
1377 vnet_sw_interface_t *si;
1379 ip6_nbr_stats_ctx_t ctx = {
1385 pool_foreach (si, im->sw_interfaces,
1388 * update the interface we are now concerned with
1390 ctx.sw_if_index = si->sw_if_index;
1393 * we are about to walk another interface, so we shouldn't have any pending
1396 ASSERT(ctx.counters == NULL);
1399 * visit each neighbour adjacency on the interface and collect
1400 * its current stats.
1401 * Because we hold the lock the walk is synchronous, so safe to routing
1402 * updates. It's limited in work by the number of adjacenies on an
1403 * interface, which is typically not huge.
1405 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1406 adj_nbr_walk (si->sw_if_index,
1413 * if this interface has some adjacencies with counters then ship them,
1414 * else continue to the next interface.
1416 if (NULL != ctx.counters)
1418 ip6_nbr_ship(sm, &ctx);
1425 do_ip4_fib_counters (stats_main_t * sm)
1427 ip4_main_t *im4 = &ip4_main;
1428 api_main_t *am = sm->api_main;
1429 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1430 svm_queue_t *q = shmem_hdr->vl_input_queue;
1434 do_ip46_fibs_t *do_fibs;
1435 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1436 u32 items_this_message;
1437 vl_api_ip4_fib_counter_t *ctrp = 0;
1438 u32 start_at_fib_index = 0;
1441 do_fibs = &sm->do_ip46_fibs;
1444 vec_reset_length (do_fibs->fibs);
1446 pool_foreach (fib, im4->fibs,
1447 ({vec_add1(do_fibs->fibs,fib);}));
1451 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1453 fib = do_fibs->fibs[j];
1454 /* We may have bailed out due to control-plane activity */
1455 while ((fib - im4->fibs) < start_at_fib_index)
1458 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1462 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1463 mp = vl_msg_api_alloc_as_if_client
1465 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1466 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1468 mp->vrf_id = ntohl (fib->ft_table_id);
1469 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1473 /* happens if the last FIB was empty... */
1474 ASSERT (mp->count == 0);
1475 mp->vrf_id = ntohl (fib->ft_table_id);
1478 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1480 vec_reset_length (do_fibs->ip4routes);
1481 vec_reset_length (do_fibs->results);
1483 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1485 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1489 vec_reset_length (do_fibs->pvec);
1491 x.address_length = i;
1493 hash_foreach_pair (p, hash, (
1495 vec_add1 (do_fibs->pvec, p);}
1497 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1499 p = do_fibs->pvec[k];
1500 x.address.data_u32 = p->key;
1501 x.index = p->value[0];
1503 vec_add1 (do_fibs->ip4routes, x);
1504 if (sm->data_structure_lock->release_hint)
1506 start_at_fib_index = fib - im4->fibs;
1508 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1509 STATS_RELEASE_DELAY_NS);
1511 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1517 vec_foreach (r, do_fibs->ip4routes)
1520 const dpo_id_t *dpo_id;
1523 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1524 index = (u32) dpo_id->dpoi_index;
1526 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1529 * If it has actually
1530 * seen at least one packet, send it.
1535 /* already in net byte order */
1536 ctrp->address = r->address.as_u32;
1537 ctrp->address_length = r->address_length;
1538 ctrp->packets = clib_host_to_net_u64 (c.packets);
1539 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1543 if (mp->count == items_this_message)
1545 mp->count = htonl (items_this_message);
1547 * If the main thread's input queue is stuffed,
1548 * drop the data structure lock (which the main thread
1549 * may want), and take a pause.
1552 if (svm_queue_is_full (q))
1555 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1556 svm_queue_unlock (q);
1558 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1559 STATS_RELEASE_DELAY_NS);
1562 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1563 svm_queue_unlock (q);
1565 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1566 mp = vl_msg_api_alloc_as_if_client
1568 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1569 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1571 mp->vrf_id = ntohl (fib->ft_table_id);
1572 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1574 } /* for each (mp or single) adj */
1575 if (sm->data_structure_lock->release_hint)
1577 start_at_fib_index = fib - im4->fibs;
1579 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1581 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1584 } /* vec_foreach (routes) */
1588 /* Flush any data from this fib */
1591 mp->count = htonl (mp->count);
1592 vl_msg_api_send_shmem (q, (u8 *) & mp);
1597 /* If e.g. the last FIB had no reportable routes, free the buffer */
1599 vl_msg_api_free (mp);
1603 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1605 stats_main_t *sm = ctx;
1606 do_ip46_fibs_t *do_fibs;
1607 mfib_entry_t *entry;
1609 do_fibs = &sm->do_ip46_fibs;
1610 entry = mfib_entry_get (fei);
1612 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1618 do_ip4_mfib_counters (stats_main_t * sm)
1620 ip4_main_t *im4 = &ip4_main;
1621 api_main_t *am = sm->api_main;
1622 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1623 svm_queue_t *q = shmem_hdr->vl_input_queue;
1626 do_ip46_fibs_t *do_fibs;
1627 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1628 u32 items_this_message;
1629 vl_api_ip4_mfib_counter_t *ctrp = 0;
1630 u32 start_at_mfib_index = 0;
1633 do_fibs = &sm->do_ip46_fibs;
1635 vec_reset_length (do_fibs->mfibs);
1637 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1640 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1642 mfib = do_fibs->mfibs[j];
1643 /* We may have bailed out due to control-plane activity */
1644 while ((mfib - im4->mfibs) < start_at_mfib_index)
1649 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1650 mp = vl_msg_api_alloc_as_if_client
1652 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1653 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1655 mp->vrf_id = ntohl (mfib->mft_table_id);
1656 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1660 /* happens if the last MFIB was empty... */
1661 ASSERT (mp->count == 0);
1662 mp->vrf_id = ntohl (mfib->mft_table_id);
1665 vec_reset_length (do_fibs->mroutes);
1668 * walk the table with table updates blocked
1670 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1672 mfib_table_walk (mfib->mft_index,
1673 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1676 vec_foreach (pfx, do_fibs->mroutes)
1678 const dpo_id_t *dpo_id;
1679 fib_node_index_t mfei;
1684 * re-lookup the entry, since we suspend during the collection
1686 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1688 if (FIB_NODE_INDEX_INVALID == mfei)
1691 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1692 index = (u32) dpo_id->dpoi_index;
1694 vlib_get_combined_counter (&replicate_main.repm_counters,
1695 dpo_id->dpoi_index, &c);
1697 * If it has seen at least one packet, send it.
1701 /* already in net byte order */
1702 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1703 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1704 ctrp->group_length = pfx->fp_len;
1705 ctrp->packets = clib_host_to_net_u64 (c.packets);
1706 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1710 if (mp->count == items_this_message)
1712 mp->count = htonl (items_this_message);
1714 * If the main thread's input queue is stuffed,
1715 * drop the data structure lock (which the main thread
1716 * may want), and take a pause.
1720 while (svm_queue_is_full (q))
1722 svm_queue_unlock (q);
1723 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1724 STATS_RELEASE_DELAY_NS);
1727 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1728 svm_queue_unlock (q);
1730 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1731 mp = vl_msg_api_alloc_as_if_client
1733 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1734 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1736 mp->vrf_id = ntohl (mfib->mft_table_id);
1737 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1742 /* Flush any data from this mfib */
1745 mp->count = htonl (mp->count);
1746 vl_msg_api_send_shmem (q, (u8 *) & mp);
1751 /* If e.g. the last FIB had no reportable routes, free the buffer */
1753 vl_msg_api_free (mp);
1757 do_ip6_mfib_counters (stats_main_t * sm)
1759 ip6_main_t *im6 = &ip6_main;
1760 api_main_t *am = sm->api_main;
1761 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1762 svm_queue_t *q = shmem_hdr->vl_input_queue;
1765 do_ip46_fibs_t *do_fibs;
1766 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1767 u32 items_this_message;
1768 vl_api_ip6_mfib_counter_t *ctrp = 0;
1769 u32 start_at_mfib_index = 0;
1772 do_fibs = &sm->do_ip46_fibs;
1774 vec_reset_length (do_fibs->mfibs);
1776 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1779 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1781 mfib = do_fibs->mfibs[j];
1782 /* We may have bailed out due to control-plane activity */
1783 while ((mfib - im6->mfibs) < start_at_mfib_index)
1788 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1789 mp = vl_msg_api_alloc_as_if_client
1791 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1792 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1794 mp->vrf_id = ntohl (mfib->mft_table_id);
1795 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1799 /* happens if the last MFIB was empty... */
1800 ASSERT (mp->count == 0);
1801 mp->vrf_id = ntohl (mfib->mft_table_id);
1804 vec_reset_length (do_fibs->mroutes);
1807 * walk the table with table updates blocked
1809 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1811 mfib_table_walk (mfib->mft_index,
1812 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1815 vec_foreach (pfx, do_fibs->mroutes)
1817 const dpo_id_t *dpo_id;
1818 fib_node_index_t mfei;
1823 * re-lookup the entry, since we suspend during the collection
1825 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1827 if (FIB_NODE_INDEX_INVALID == mfei)
1830 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1831 index = (u32) dpo_id->dpoi_index;
1833 vlib_get_combined_counter (&replicate_main.repm_counters,
1834 dpo_id->dpoi_index, &c);
1836 * If it has seen at least one packet, send it.
1840 /* already in net byte order */
1841 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1842 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1843 ctrp->group_length = pfx->fp_len;
1844 ctrp->packets = clib_host_to_net_u64 (c.packets);
1845 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1849 if (mp->count == items_this_message)
1851 mp->count = htonl (items_this_message);
1853 * If the main thread's input queue is stuffed,
1854 * drop the data structure lock (which the main thread
1855 * may want), and take a pause.
1859 while (svm_queue_is_full (q))
1861 svm_queue_unlock (q);
1862 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1863 STATS_RELEASE_DELAY_NS);
1866 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1867 svm_queue_unlock (q);
1869 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1870 mp = vl_msg_api_alloc_as_if_client
1872 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1873 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1875 mp->vrf_id = ntohl (mfib->mft_table_id);
1876 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1881 /* Flush any data from this mfib */
1884 mp->count = htonl (mp->count);
1885 vl_msg_api_send_shmem (q, (u8 *) & mp);
1890 /* If e.g. the last FIB had no reportable routes, free the buffer */
1892 vl_msg_api_free (mp);
1898 ip6_route_t **routep;
1900 } add_routes_in_fib_arg_t;
1903 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1905 add_routes_in_fib_arg_t *ap = arg;
1906 stats_main_t *sm = ap->sm;
1908 if (sm->data_structure_lock->release_hint)
1909 clib_longjmp (&sm->jmp_buf, 1);
1911 if (kvp->key[2] >> 32 == ap->fib_index)
1913 ip6_address_t *addr;
1915 addr = (ip6_address_t *) kvp;
1916 vec_add2 (*ap->routep, r, 1);
1917 r->address = addr[0];
1918 r->address_length = kvp->key[2] & 0xFF;
1919 r->index = kvp->value;
1924 do_ip6_fib_counters (stats_main_t * sm)
1926 ip6_main_t *im6 = &ip6_main;
1927 api_main_t *am = sm->api_main;
1928 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1929 svm_queue_t *q = shmem_hdr->vl_input_queue;
1932 do_ip46_fibs_t *do_fibs;
1933 vl_api_vnet_ip6_fib_counters_t *mp = 0;
1934 u32 items_this_message;
1935 vl_api_ip6_fib_counter_t *ctrp = 0;
1936 u32 start_at_fib_index = 0;
1937 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
1938 add_routes_in_fib_arg_t _a, *a = &_a;
1941 do_fibs = &sm->do_ip46_fibs;
1943 vec_reset_length (do_fibs->fibs);
1945 pool_foreach (fib, im6->fibs,
1946 ({vec_add1(do_fibs->fibs,fib);}));
1950 for (i = 0; i < vec_len (do_fibs->fibs); i++)
1952 fib = do_fibs->fibs[i];
1953 /* We may have bailed out due to control-plane activity */
1954 while ((fib - im6->fibs) < start_at_fib_index)
1959 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1960 mp = vl_msg_api_alloc_as_if_client
1962 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1963 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1965 mp->vrf_id = ntohl (fib->ft_table_id);
1966 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1969 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1971 vec_reset_length (do_fibs->ip6routes);
1972 vec_reset_length (do_fibs->results);
1974 a->fib_index = fib - im6->fibs;
1975 a->routep = &do_fibs->ip6routes;
1978 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
1980 start_at_fib_index = fib - im6->fibs;
1981 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
1986 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1987 STATS_RELEASE_DELAY_NS);
1989 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1993 vec_foreach (r, do_fibs->ip6routes)
1997 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2000 * If it has actually
2001 * seen at least one packet, send it.
2005 /* already in net byte order */
2006 ctrp->address[0] = r->address.as_u64[0];
2007 ctrp->address[1] = r->address.as_u64[1];
2008 ctrp->address_length = (u8) r->address_length;
2009 ctrp->packets = clib_host_to_net_u64 (c.packets);
2010 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2014 if (mp->count == items_this_message)
2016 mp->count = htonl (items_this_message);
2018 * If the main thread's input queue is stuffed,
2019 * drop the data structure lock (which the main thread
2020 * may want), and take a pause.
2023 if (svm_queue_is_full (q))
2026 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2027 svm_queue_unlock (q);
2029 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2030 STATS_RELEASE_DELAY_NS);
2033 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2034 svm_queue_unlock (q);
2036 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2037 mp = vl_msg_api_alloc_as_if_client
2039 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2040 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2042 mp->vrf_id = ntohl (fib->ft_table_id);
2043 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2047 if (sm->data_structure_lock->release_hint)
2049 start_at_fib_index = fib - im6->fibs;
2051 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2053 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2056 } /* vec_foreach (routes) */
2060 /* Flush any data from this fib */
2063 mp->count = htonl (mp->count);
2064 vl_msg_api_send_shmem (q, (u8 *) & mp);
2069 /* If e.g. the last FIB had no reportable routes, free the buffer */
2071 vl_msg_api_free (mp);
2074 typedef struct udp_encap_stat_t_
2080 typedef struct udp_encap_stats_walk_t_
2082 udp_encap_stat_t *stats;
2083 } udp_encap_stats_walk_t;
2086 udp_encap_stats_walk_cb (index_t uei, void *arg)
2088 udp_encap_stats_walk_t *ctx = arg;
2089 udp_encap_stat_t *stat;
2092 ue = udp_encap_get (uei);
2093 vec_add2 (ctx->stats, stat, 1);
2096 udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]);
2102 udp_encap_ship (udp_encap_stats_walk_t * ctx)
2104 vl_api_vnet_udp_encap_counters_t *mp;
2105 vl_shmem_hdr_t *shmem_hdr;
2113 shmem_hdr = am->shmem_hdr;
2114 q = shmem_hdr->vl_input_queue;
2117 * If the walk context has counters, which may be left over from the last
2118 * suspend, then we continue from there.
2120 while (0 != vec_len (ctx->stats))
2122 u32 n_items = MIN (vec_len (ctx->stats),
2123 UDP_ENCAP_COUNTER_BATCH_SIZE);
2126 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2128 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2131 (vl_api_udp_encap_counter_t)));
2132 mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2133 mp->count = ntohl (n_items);
2136 * copy the counters from the back of the context, then we can easily
2137 * 'erase' them by resetting the vector length.
2138 * The order we push the stats to the caller is not important.
2141 &ctx->stats[vec_len (ctx->stats) - n_items],
2142 n_items * sizeof (*ctx->stats));
2144 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2150 pause = svm_queue_is_full (q);
2152 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2153 svm_queue_unlock (q);
2157 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2158 STATS_RELEASE_DELAY_NS);
2163 do_udp_encap_counters (stats_main_t * sm)
2165 udp_encap_stat_t *stat;
2167 udp_encap_stats_walk_t ctx = {
2171 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2172 udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
2175 udp_encap_ship (&ctx);
2179 stats_set_poller_delay (u32 poller_delay_sec)
2181 stats_main_t *sm = &stats_main;
2182 if (!poller_delay_sec)
2184 return VNET_API_ERROR_INVALID_ARGUMENT;
2188 sm->stats_poll_interval_in_seconds = poller_delay_sec;
2193 static clib_error_t *
2194 stats_config (vlib_main_t * vm, unformat_input_t * input)
2198 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2200 if (unformat (input, "interval %u", &sec))
2202 int rv = stats_set_poller_delay (sec);
2205 return clib_error_return (0,
2206 "`stats_set_poller_delay' API call failed, rv=%d:%U",
2207 (int) rv, format_vnet_api_errno, rv);
2213 return clib_error_return (0, "unknown input '%U'",
2214 format_unformat_error, input);
2220 /* stats { ... } configuration. */
2223 * @cfgcmd{interval, <seconds>}
2224 * Configure stats poller delay to be @c seconds.
2227 VLIB_CONFIG_FUNCTION (stats_config, "stats");
2230 vl_api_stats_get_poller_delay_t_handler
2231 (vl_api_stats_get_poller_delay_t * mp)
2233 stats_main_t *sm = &stats_main;
2234 vl_api_registration_t *reg;
2235 reg = vl_api_client_index_to_registration (mp->client_index);
2238 vl_api_stats_get_poller_delay_reply_t *rmp;
2240 rmp = vl_msg_api_alloc (sizeof (*rmp));
2241 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
2242 rmp->context = mp->context;
2244 rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2246 vl_api_send_msg (reg, (u8 *) rmp);
2251 stats_thread_fn (void *arg)
2253 stats_main_t *sm = &stats_main;
2254 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2255 vlib_thread_main_t *tm = vlib_get_thread_main ();
2257 /* stats thread wants no signals. */
2261 pthread_sigmask (SIG_SETMASK, &s, 0);
2264 if (vec_len (tm->thread_prefix))
2265 vlib_set_thread_name ((char *)
2266 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2268 clib_mem_set_heap (w->thread_mheap);
2272 ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
2275 if (!(sm->enable_poller))
2280 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2281 do_combined_per_interface_counters (sm);
2284 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2285 do_simple_per_interface_counters (sm);
2287 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2288 do_ip4_fib_counters (sm);
2290 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2291 do_ip6_fib_counters (sm);
2293 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2294 do_ip4_mfib_counters (sm);
2296 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2297 do_ip6_mfib_counters (sm);
2299 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2300 do_ip4_nbr_counters (sm);
2302 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2303 do_ip6_nbr_counters (sm);
2305 if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS]))
2306 do_udp_encap_counters (sm);
2311 vl_api_vnet_interface_simple_counters_t_handler
2312 (vl_api_vnet_interface_simple_counters_t * mp)
2314 vpe_client_registration_t *clients, client;
2315 stats_main_t *sm = &stats_main;
2316 vl_api_registration_t *reg, *reg_prev = NULL;
2317 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2321 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2324 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2325 ~0 /*flag for all */ );
2327 for (i = 0; i < vec_len (clients); i++)
2329 client = clients[i];
2330 reg = vl_api_client_index_to_registration (client.client_index);
2333 if (reg_prev && vl_api_can_send_msg (reg_prev))
2335 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2336 clib_memcpy (mp_copy, mp, mp_size);
2337 vl_api_send_msg (reg_prev, (u8 *) mp);
2345 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2346 client.client_index);
2352 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2355 if (reg_prev && vl_api_can_send_msg (reg_prev))
2357 vl_api_send_msg (reg_prev, (u8 *) mp);
2361 vl_msg_api_free (mp);
2366 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2368 stats_main_t *sm = &stats_main;
2369 vl_api_registration_t *reg, *reg_prev = NULL;
2370 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2372 vpe_client_registration_t *clients, client;
2375 mp_size = sizeof (*mp_copy) +
2376 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2379 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2381 for (i = 0; i < vec_len (clients); i++)
2383 client = clients[i];
2384 reg = vl_api_client_index_to_registration (client.client_index);
2387 if (reg_prev && vl_api_can_send_msg (reg_prev))
2389 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2390 clib_memcpy (mp_copy, mp, mp_size);
2391 vl_api_send_msg (reg_prev, (u8 *) mp);
2398 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2399 ~0, client.client_index);
2404 if (reg_prev && vl_api_can_send_msg (reg_prev))
2406 vl_api_send_msg (reg_prev, (u8 *) mp);
2410 vl_msg_api_free (mp);
2415 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2417 stats_main_t *sm = &stats_main;
2418 vl_api_registration_t *reg, *reg_prev = NULL;
2419 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2421 vpe_client_registration_t *clients, client;
2424 mp_size = sizeof (*mp_copy) +
2425 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2428 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2430 for (i = 0; i < vec_len (clients); i++)
2432 client = clients[i];
2433 reg = vl_api_client_index_to_registration (client.client_index);
2436 if (reg_prev && vl_api_can_send_msg (reg_prev))
2438 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2439 clib_memcpy (mp_copy, mp, mp_size);
2440 vl_api_send_msg (reg_prev, (u8 *) mp);
2447 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2448 ~0, client.client_index);
2454 if (reg_prev && vl_api_can_send_msg (reg_prev))
2456 vl_api_send_msg (reg_prev, (u8 *) mp);
2460 vl_msg_api_free (mp);
2465 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2467 stats_main_t *sm = &stats_main;
2468 vl_api_registration_t *reg, *reg_prev = NULL;
2469 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2471 vpe_client_registration_t *clients, client;
2474 mp_size = sizeof (*mp_copy) +
2475 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2478 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2480 for (i = 0; i < vec_len (clients); i++)
2482 client = clients[i];
2483 reg = vl_api_client_index_to_registration (client.client_index);
2486 if (reg_prev && vl_api_can_send_msg (reg_prev))
2488 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2489 clib_memcpy (mp_copy, mp, mp_size);
2490 vl_api_send_msg (reg_prev, (u8 *) mp);
2497 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2498 ~0, client.client_index);
2503 if (reg_prev && vl_api_can_send_msg (reg_prev))
2505 vl_api_send_msg (reg_prev, (u8 *) mp);
2509 vl_msg_api_free (mp);
2514 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2516 stats_main_t *sm = &stats_main;
2517 vl_api_registration_t *reg, *reg_prev = NULL;
2518 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2520 vpe_client_registration_t *clients, client;
2523 mp_size = sizeof (*mp_copy) +
2524 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2527 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2529 for (i = 0; i < vec_len (clients); i++)
2531 client = clients[i];
2532 reg = vl_api_client_index_to_registration (client.client_index);
2535 if (reg_prev && vl_api_can_send_msg (reg_prev))
2537 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2538 clib_memcpy (mp_copy, mp, mp_size);
2539 vl_api_send_msg (reg_prev, (u8 *) mp);
2546 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2547 ~0, client.client_index);
2552 if (reg_prev && vl_api_can_send_msg (reg_prev))
2554 vl_api_send_msg (reg_prev, (u8 *) mp);
2558 vl_msg_api_free (mp);
2563 vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
2565 stats_main_t *sm = &stats_main;
2566 vpe_client_registration_t rp;
2567 vl_api_want_udp_encap_stats_reply_t *rmp;
2570 vl_api_registration_t *reg;
2573 fib = ~0; //Using same mechanism as _per_interface_
2574 rp.client_index = mp->client_index;
2575 rp.client_pid = mp->pid;
2577 handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2580 reg = vl_api_client_index_to_registration (mp->client_index);
2584 sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2585 fib, mp->client_index);
2589 rmp = vl_msg_api_alloc (sizeof (*rmp));
2590 rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2591 rmp->context = mp->context;
2592 rmp->retval = retval;
2594 vl_api_send_msg (reg, (u8 *) rmp);
2598 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2600 stats_main_t *sm = &stats_main;
2601 vpe_client_registration_t rp;
2602 vl_api_want_stats_reply_t *rmp;
2606 vl_api_registration_t *reg;
2608 item = ~0; //"ALL THE THINGS IN THE THINGS
2609 rp.client_index = mp->client_index;
2610 rp.client_pid = mp->pid;
2612 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2613 item, mp->enable_disable);
2615 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2616 item, mp->enable_disable);
2618 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2619 item, mp->enable_disable);
2621 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2622 item, mp->enable_disable);
2624 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2625 item, mp->enable_disable);
2627 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2628 item, mp->enable_disable);
2631 reg = vl_api_client_index_to_registration (mp->client_index);
2635 rmp = vl_msg_api_alloc (sizeof (*rmp));
2636 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2637 rmp->context = mp->context;
2638 rmp->retval = retval;
2640 vl_api_send_msg (reg, (u8 *) rmp);
2644 vl_api_want_interface_simple_stats_t_handler
2645 (vl_api_want_interface_simple_stats_t * mp)
2647 stats_main_t *sm = &stats_main;
2648 vpe_client_registration_t rp;
2649 vl_api_want_interface_simple_stats_reply_t *rmp;
2653 vl_api_registration_t *reg;
2655 swif = ~0; //Using same mechanism as _per_interface_
2656 rp.client_index = mp->client_index;
2657 rp.client_pid = mp->pid;
2659 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2660 mp->enable_disable);
2663 reg = vl_api_client_index_to_registration (mp->client_index);
2668 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2673 rmp = vl_msg_api_alloc (sizeof (*rmp));
2674 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2675 rmp->context = mp->context;
2676 rmp->retval = retval;
2678 vl_api_send_msg (reg, (u8 *) rmp);
2683 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2685 stats_main_t *sm = &stats_main;
2686 vpe_client_registration_t rp;
2687 vl_api_want_ip4_fib_stats_reply_t *rmp;
2690 vl_api_registration_t *reg;
2693 fib = ~0; //Using same mechanism as _per_interface_
2694 rp.client_index = mp->client_index;
2695 rp.client_pid = mp->pid;
2697 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2698 mp->enable_disable);
2701 reg = vl_api_client_index_to_registration (mp->client_index);
2705 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2706 fib, mp->client_index);
2710 rmp = vl_msg_api_alloc (sizeof (*rmp));
2711 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2712 rmp->context = mp->context;
2713 rmp->retval = retval;
2715 vl_api_send_msg (reg, (u8 *) rmp);
2719 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2721 stats_main_t *sm = &stats_main;
2722 vpe_client_registration_t rp;
2723 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2726 vl_api_registration_t *reg;
2729 mfib = ~0; //Using same mechanism as _per_interface_
2730 rp.client_index = mp->client_index;
2731 rp.client_pid = mp->pid;
2733 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2734 mp->enable_disable);
2737 reg = vl_api_client_index_to_registration (mp->client_index);
2740 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2741 mfib, mp->client_index);
2745 rmp = vl_msg_api_alloc (sizeof (*rmp));
2746 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2747 rmp->context = mp->context;
2748 rmp->retval = retval;
2750 vl_api_send_msg (reg, (u8 *) rmp);
2754 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2756 stats_main_t *sm = &stats_main;
2757 vpe_client_registration_t rp;
2758 vl_api_want_ip4_fib_stats_reply_t *rmp;
2761 vl_api_registration_t *reg;
2764 fib = ~0; //Using same mechanism as _per_interface_
2765 rp.client_index = mp->client_index;
2766 rp.client_pid = mp->pid;
2768 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2769 mp->enable_disable);
2772 reg = vl_api_client_index_to_registration (mp->client_index);
2775 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2776 fib, mp->client_index);
2780 rmp = vl_msg_api_alloc (sizeof (*rmp));
2781 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2782 rmp->context = mp->context;
2783 rmp->retval = retval;
2785 vl_api_send_msg (reg, (u8 *) rmp);
2789 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2791 stats_main_t *sm = &stats_main;
2792 vpe_client_registration_t rp;
2793 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2796 vl_api_registration_t *reg;
2799 mfib = ~0; //Using same mechanism as _per_interface_
2800 rp.client_index = mp->client_index;
2801 rp.client_pid = mp->pid;
2803 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2804 mp->enable_disable);
2807 reg = vl_api_client_index_to_registration (mp->client_index);
2810 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2811 mfib, mp->client_index);
2815 rmp = vl_msg_api_alloc (sizeof (*rmp));
2816 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2817 rmp->context = mp->context;
2818 rmp->retval = retval;
2820 vl_api_send_msg (reg, (u8 *) rmp);
2823 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2825 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2830 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2835 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2837 stats_main_t *sm = &stats_main;
2838 vnet_interface_main_t *im = sm->interface_main;
2839 vl_api_vnet_get_summary_stats_reply_t *rmp;
2840 vlib_combined_counter_main_t *cm;
2843 u64 total_pkts[VLIB_N_RX_TX];
2844 u64 total_bytes[VLIB_N_RX_TX];
2845 vl_api_registration_t *reg;
2847 reg = vl_api_client_index_to_registration (mp->client_index);
2851 rmp = vl_msg_api_alloc (sizeof (*rmp));
2852 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2853 rmp->context = mp->context;
2856 memset (total_pkts, 0, sizeof (total_pkts));
2857 memset (total_bytes, 0, sizeof (total_bytes));
2859 vnet_interface_counter_lock (im);
2861 vec_foreach (cm, im->combined_sw_if_counters)
2863 which = cm - im->combined_sw_if_counters;
2865 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2867 vlib_get_combined_counter (cm, i, &v);
2868 total_pkts[which] += v.packets;
2869 total_bytes[which] += v.bytes;
2872 vnet_interface_counter_unlock (im);
2874 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2875 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2876 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2877 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2879 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2881 vl_api_send_msg (reg, (u8 *) rmp);
2885 stats_memclnt_delete_callback (u32 client_index)
2887 vpe_client_stats_registration_t *rp;
2888 stats_main_t *sm = &stats_main;
2892 /* p = hash_get (sm->stats_registration_hash, client_index); */
2895 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2896 /* pool_put (sm->stats_registrations, rp); */
2897 /* hash_unset (sm->stats_registration_hash, client_index); */
2903 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2904 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2905 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2906 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2907 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2908 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2909 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2910 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2911 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2912 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2913 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2914 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2916 static clib_error_t *
2917 stats_init (vlib_main_t * vm)
2919 stats_main_t *sm = &stats_main;
2920 api_main_t *am = &api_main;
2921 void *vlib_worker_thread_bootstrap_fn (void *arg);
2924 sm->vnet_main = vnet_get_main ();
2925 sm->interface_main = &vnet_get_main ()->interface_main;
2927 sm->stats_poll_interval_in_seconds = 10;
2928 sm->data_structure_lock =
2929 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
2930 CLIB_CACHE_LINE_BYTES);
2931 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
2934 vl_msg_api_set_handlers(VL_API_##N, #n, \
2935 vl_api_##n##_t_handler, \
2937 vl_api_##n##_t_endian, \
2938 vl_api_##n##_t_print, \
2939 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
2943 /* tell the msg infra not to free these messages... */
2944 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
2945 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
2946 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
2947 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
2948 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
2949 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
2952 * Set up the (msg_name, crc, message-id) table
2954 setup_message_id_table (am);
2956 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
2957 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
2958 #define stats_reg(n) \
2959 sm->stats_registrations[IDX_##n] = 0; \
2960 sm->stats_registration_hash[IDX_##n] = 0;
2961 #include <vpp/stats/stats.reg>
2967 VLIB_INIT_FUNCTION (stats_init);
2970 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
2972 .function = stats_thread_fn,
2975 .no_data_structure_clone = 1,
2981 * fd.io coding-style-patch-verification: ON
2984 * eval: (c-set-style "gnu")