2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
25 stats_main_t stats_main;
27 #include <vnet/ip/ip.h>
29 #include <vpp/api/vpe_msg_enum.h>
32 #define f64_print(a,b)
34 #define vl_typedefs /* define message structures */
35 #include <vpp/api/vpe_all_api_h.h>
38 #define vl_endianfun /* define message structures */
39 #include <vpp/api/vpe_all_api_h.h>
42 /* instantiate all the print functions we know about */
43 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
45 #include <vpp/api/vpe_all_api_h.h>
48 #define foreach_stats_msg \
49 _(WANT_STATS, want_stats) \
50 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
51 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
52 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
53 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
55 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
56 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
57 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
58 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
59 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
60 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
61 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
62 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
63 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
64 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
65 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
66 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
67 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
68 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats)
70 #define vl_msg_name_crc_list
71 #include <vpp/stats/stats.api.h>
72 #undef vl_msg_name_crc_list
75 setup_message_id_table (api_main_t * am)
78 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
79 foreach_vl_msg_name_crc_stats;
83 /* These constants ensure msg sizes <= 1024, aka ring allocation */
84 #define SIMPLE_COUNTER_BATCH_SIZE 126
85 #define COMBINED_COUNTER_BATCH_SIZE 63
86 #define IP4_FIB_COUNTER_BATCH_SIZE 48
87 #define IP6_FIB_COUNTER_BATCH_SIZE 30
88 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
89 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
90 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
93 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
97 format_vnet_interface_combined_counters (u8 * s, va_list * args)
99 stats_main_t *sm = &stats_main;
100 vl_api_vnet_interface_combined_counters_t *mp =
101 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
104 u32 count, sw_if_index;
106 count = ntohl (mp->count);
107 sw_if_index = ntohl (mp->first_sw_if_index);
111 vp = (vlib_counter_t *) mp->data;
113 switch (mp->vnet_counter_type)
115 case VNET_INTERFACE_COUNTER_RX:
118 case VNET_INTERFACE_COUNTER_TX:
122 counter_name = "bogus";
125 for (i = 0; i < count; i++)
127 packets = clib_mem_unaligned (&vp->packets, u64);
128 packets = clib_net_to_host_u64 (packets);
129 bytes = clib_mem_unaligned (&vp->bytes, u64);
130 bytes = clib_net_to_host_u64 (bytes);
132 s = format (s, "%U.%s.packets %lld\n",
133 format_vnet_sw_if_index_name,
134 sm->vnet_main, sw_if_index, counter_name, packets);
135 s = format (s, "%U.%s.bytes %lld\n",
136 format_vnet_sw_if_index_name,
137 sm->vnet_main, sw_if_index, counter_name, bytes);
144 format_vnet_interface_simple_counters (u8 * s, va_list * args)
146 stats_main_t *sm = &stats_main;
147 vl_api_vnet_interface_simple_counters_t *mp =
148 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
150 u32 count, sw_if_index;
151 count = ntohl (mp->count);
152 sw_if_index = ntohl (mp->first_sw_if_index);
154 vp = (u64 *) mp->data;
157 switch (mp->vnet_counter_type)
159 case VNET_INTERFACE_COUNTER_DROP:
160 counter_name = "drop";
162 case VNET_INTERFACE_COUNTER_PUNT:
163 counter_name = "punt";
165 case VNET_INTERFACE_COUNTER_IP4:
166 counter_name = "ip4";
168 case VNET_INTERFACE_COUNTER_IP6:
169 counter_name = "ip6";
171 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
172 counter_name = "rx-no-buff";
174 case VNET_INTERFACE_COUNTER_RX_MISS:
175 counter_name = "rx-miss";
177 case VNET_INTERFACE_COUNTER_RX_ERROR:
178 counter_name = "rx-error (fifo-full)";
180 case VNET_INTERFACE_COUNTER_TX_ERROR:
181 counter_name = "tx-error (fifo-full)";
184 counter_name = "bogus";
187 for (i = 0; i < count; i++)
189 v = clib_mem_unaligned (vp, u64);
190 v = clib_net_to_host_u64 (v);
192 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
193 sm->vnet_main, sw_if_index, counter_name, v);
201 dslock (stats_main_t * sm, int release_hint, int tag)
204 data_structure_lock_t *l = sm->data_structure_lock;
206 if (PREDICT_FALSE (l == 0))
209 thread_index = vlib_get_thread_index ();
210 if (l->lock && l->thread_index == thread_index)
219 while (__sync_lock_test_and_set (&l->lock, 1))
222 l->thread_index = thread_index;
227 stats_dslock_with_hint (int hint, int tag)
229 stats_main_t *sm = &stats_main;
230 dslock (sm, hint, tag);
234 dsunlock (stats_main_t * sm)
237 data_structure_lock_t *l = sm->data_structure_lock;
239 if (PREDICT_FALSE (l == 0))
242 thread_index = vlib_get_thread_index ();
243 ASSERT (l->lock && l->thread_index == thread_index);
249 CLIB_MEMORY_BARRIER ();
255 stats_dsunlock (int hint, int tag)
257 stats_main_t *sm = &stats_main;
261 static vpe_client_registration_t *
262 get_client_for_stat (u32 reg, u32 item, u32 client_index)
264 stats_main_t *sm = &stats_main;
265 vpe_client_stats_registration_t *registration;
268 /* Is there anything listening for item in that reg */
269 p = hash_get (sm->stats_registration_hash[reg], item);
274 /* If there is, is our client_index one of them */
275 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
276 p = hash_get (registration->client_hash, client_index);
281 return pool_elt_at_index (registration->clients, p[0]);
286 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
288 stats_main_t *sm = &stats_main;
289 vpe_client_stats_registration_t *registration;
290 vpe_client_registration_t *cr;
293 /* Is there anything listening for item in that reg */
294 p = hash_get (sm->stats_registration_hash[reg], item);
298 pool_get (sm->stats_registrations[reg], registration);
299 registration->item = item;
300 registration->client_hash = NULL;
301 registration->clients = NULL;
302 hash_set (sm->stats_registration_hash[reg], item,
303 registration - sm->stats_registrations[reg]);
307 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
310 p = hash_get (registration->client_hash, client->client_index);
314 pool_get (registration->clients, cr);
315 cr->client_index = client->client_index;
316 cr->client_pid = client->client_pid;
317 hash_set (registration->client_hash, cr->client_index,
318 cr - registration->clients);
321 return 1; //At least one client is doing something ... poll
325 clear_one_client (u32 reg_index, u32 reg, u32 item, u32 client_index)
327 stats_main_t *sm = &stats_main;
328 vpe_client_stats_registration_t *registration;
329 vpe_client_registration_t *client;
332 registration = pool_elt_at_index (sm->stats_registrations[reg], reg_index);
333 p = hash_get (registration->client_hash, client_index);
337 client = pool_elt_at_index (registration->clients, p[0]);
338 hash_unset (registration->client_hash, client->client_index);
339 pool_put (registration->clients, client);
341 /* Now check if that was the last client for that item */
342 if (0 == pool_elts (registration->clients))
344 hash_unset (sm->stats_registration_hash[reg], item);
345 hash_free (registration->client_hash);
346 pool_free (registration->clients);
347 pool_put (sm->stats_registrations[reg], registration);
353 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
355 stats_main_t *sm = &stats_main;
359 /* Clear the client first */
360 /* Is there anything listening for item in that reg */
361 p = hash_get (sm->stats_registration_hash[reg], item);
366 /* If there is, is our client_index one of them */
367 clear_one_client (p[0], reg, item, client_index);
371 /* Now check if that was the last item in any of the listened to stats */
372 for (i = 0; i < STATS_REG_N_IDX; i++)
374 elts += pool_elts (sm->stats_registrations[i]);
380 clear_client_for_all_stats (u32 client_index)
382 stats_main_t *sm = &stats_main;
383 u32 reg_index, item, reg;
387 vec_foreach_index(reg, sm->stats_registration_hash)
389 hash_foreach(item, reg_index, sm->stats_registration_hash[reg],
391 clear_one_client(reg_index, reg, item, client_index);
397 /* Now check if that was the last item in any of the listened to stats */
398 for (i = 0; i < STATS_REG_N_IDX; i++)
400 elts += pool_elts (sm->stats_registrations[i]);
405 static clib_error_t *
406 want_stats_reaper (u32 client_index)
408 stats_main_t *sm = &stats_main;
410 sm->enable_poller = clear_client_for_all_stats (client_index);
415 VL_MSG_API_REAPER_FUNCTION (want_stats_reaper);
419 * Return a copy of the clients list.
421 vpe_client_registration_t *
422 get_clients_for_stat (u32 reg, u32 item)
424 stats_main_t *sm = &stats_main;
425 vpe_client_registration_t *client, *clients = 0;
426 vpe_client_stats_registration_t *registration;
429 /* Is there anything listening for item in that reg */
430 p = hash_get (sm->stats_registration_hash[reg], item);
435 /* If there is, is our client_index one of them */
436 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
438 vec_reset_length (clients);
441 pool_foreach (client, registration->clients,
443 vec_add1 (clients, *client);}
451 clear_client_reg (u32 ** registrations)
453 /* When registrations[x] is a vector of pool indices
454 here is a good place to clean up the pools
456 #define stats_reg(n) vec_free(registrations[IDX_##n]);
457 #include <vpp/stats/stats.reg>
460 vec_free (registrations);
464 init_client_reg (u32 ** registrations)
468 Initialise the stats registrations for each
469 type of stat a client can register for as well as
470 a vector of "interested" indexes.
471 Initially this is a u32 of either sw_if_index or fib_index
472 but eventually this should migrate to a pool_index (u32)
473 with a type specific pool that can include more complex things
474 such as timing and structured events.
476 vec_validate (registrations, STATS_REG_N_IDX);
477 #define stats_reg(n) \
478 vec_reset_length(registrations[IDX_##n]);
479 #include <vpp/stats/stats.reg>
483 When registrations[x] is a vector of pool indices, here
484 is a good place to init the pools.
486 return registrations;
490 enable_all_client_reg (u32 ** registrations)
494 Enable all stats known by adding
495 ~0 to the index vector. Eventually this
496 should be deprecated.
498 #define stats_reg(n) \
499 vec_add1(registrations[IDX_##n], ~0);
500 #include <vpp/stats/stats.reg>
502 return registrations;
506 do_simple_interface_counters (stats_main_t * sm)
508 vl_api_vnet_interface_simple_counters_t *mp = 0;
509 vnet_interface_main_t *im = sm->interface_main;
510 api_main_t *am = sm->api_main;
511 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
512 svm_queue_t *q = shmem_hdr->vl_input_queue;
513 vlib_simple_counter_main_t *cm;
514 u32 items_this_message = 0;
519 * Prevent interface registration from expanding / moving the vectors...
520 * That tends never to happen, so we can hold this lock for a while.
522 vnet_interface_counter_lock (im);
524 vec_foreach (cm, im->sw_if_counters)
526 n_counts = vlib_simple_counter_n_counters (cm);
527 for (i = 0; i < n_counts; i++)
531 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
534 mp = vl_msg_api_alloc_as_if_client
535 (sizeof (*mp) + items_this_message * sizeof (v));
536 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
537 mp->vnet_counter_type = cm - im->sw_if_counters;
538 mp->first_sw_if_index = htonl (i);
540 vp = (u64 *) mp->data;
542 v = vlib_get_simple_counter (cm, i);
543 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
546 if (mp->count == items_this_message)
548 mp->count = htonl (items_this_message);
549 /* Send to the main thread... */
550 vl_msg_api_send_shmem (q, (u8 *) & mp);
556 vnet_interface_counter_unlock (im);
560 handle_client_registration (vpe_client_registration_t * client, u32 stat,
561 u32 item, int enable_disable)
563 stats_main_t *sm = &stats_main;
564 vpe_client_registration_t *rp, _rp;
566 rp = get_client_for_stat (stat, item, client->client_index);
569 if (enable_disable == 0)
571 if (!rp) // No client to disable
573 clib_warning ("pid %d: already disabled for stats...",
578 clear_client_for_stat (stat, item, client->client_index);
585 rp->client_index = client->client_index;
586 rp->client_pid = client->client_pid;
587 sm->enable_poller = set_client_for_stat (stat, item, rp);
592 /**********************************
593 * ALL Interface Combined stats - to be deprecated
594 **********************************/
597 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
600 vl_api_want_interface_combined_stats_t_handler
601 (vl_api_want_interface_combined_stats_t * mp)
603 stats_main_t *sm = &stats_main;
604 vpe_client_registration_t rp;
605 vl_api_want_interface_combined_stats_reply_t *rmp;
608 vl_api_registration_t *reg;
611 swif = ~0; //Using same mechanism as _per_interface_
612 rp.client_index = mp->client_index;
613 rp.client_pid = mp->pid;
615 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
619 reg = vl_api_client_index_to_registration (mp->client_index);
623 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
628 rmp = vl_msg_api_alloc (sizeof (*rmp));
629 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
630 rmp->context = mp->context;
631 rmp->retval = retval;
633 vl_api_send_msg (reg, (u8 *) rmp);
637 vl_api_vnet_interface_combined_counters_t_handler
638 (vl_api_vnet_interface_combined_counters_t * mp)
640 vpe_client_registration_t *clients, client;
641 stats_main_t *sm = &stats_main;
642 vl_api_registration_t *reg, *reg_prev = NULL;
643 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
647 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
650 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
651 ~0 /*flag for all */ );
653 for (i = 0; i < vec_len (clients); i++)
656 reg = vl_api_client_index_to_registration (client.client_index);
659 if (reg_prev && vl_api_can_send_msg (reg_prev))
661 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
662 clib_memcpy (mp_copy, mp, mp_size);
663 vl_api_send_msg (reg_prev, (u8 *) mp);
671 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
674 if (reg_prev && vl_api_can_send_msg (reg_prev))
676 vl_api_send_msg (reg_prev, (u8 *) mp);
680 vl_msg_api_free (mp);
685 do_combined_interface_counters (stats_main_t * sm)
687 vl_api_vnet_interface_combined_counters_t *mp = 0;
688 vnet_interface_main_t *im = sm->interface_main;
689 api_main_t *am = sm->api_main;
690 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
691 svm_queue_t *q = shmem_hdr->vl_input_queue;
692 vlib_combined_counter_main_t *cm;
693 u32 items_this_message = 0;
694 vlib_counter_t v, *vp = 0;
697 vnet_interface_counter_lock (im);
699 vec_foreach (cm, im->combined_sw_if_counters)
701 n_counts = vlib_combined_counter_n_counters (cm);
702 for (i = 0; i < n_counts; i++)
706 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
709 mp = vl_msg_api_alloc_as_if_client
710 (sizeof (*mp) + items_this_message * sizeof (v));
711 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
712 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
713 mp->first_sw_if_index = htonl (i);
715 vp = (vlib_counter_t *) mp->data;
717 vlib_get_combined_counter (cm, i, &v);
718 clib_mem_unaligned (&vp->packets, u64)
719 = clib_host_to_net_u64 (v.packets);
720 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
723 if (mp->count == items_this_message)
725 mp->count = htonl (items_this_message);
726 /* Send to the main thread... */
727 vl_msg_api_send_shmem (q, (u8 *) & mp);
733 vnet_interface_counter_unlock (im);
736 /**********************************
737 * Per Interface Combined stats
738 **********************************/
740 /* Request from client registering interfaces it wants */
742 vl_api_want_per_interface_combined_stats_t_handler
743 (vl_api_want_per_interface_combined_stats_t * mp)
745 stats_main_t *sm = &stats_main;
746 vpe_client_registration_t rp;
747 vl_api_want_per_interface_combined_stats_reply_t *rmp;
748 vlib_combined_counter_main_t *cm;
751 vl_api_registration_t *reg;
752 u32 i, swif, num = 0;
754 num = ntohl (mp->num);
757 * Validate sw_if_indexes before registering
759 for (i = 0; i < num; i++)
761 swif = ntohl (mp->sw_ifs[i]);
764 * Check its a real sw_if_index that the client is allowed to see
768 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
770 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
776 for (i = 0; i < num; i++)
778 swif = ntohl (mp->sw_ifs[i]);
780 rp.client_index = mp->client_index;
781 rp.client_pid = mp->pid;
782 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
783 swif, ntohl (mp->enable_disable));
787 reg = vl_api_client_index_to_registration (mp->client_index);
790 for (i = 0; i < num; i++)
792 swif = ntohl (mp->sw_ifs[i]);
795 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
801 rmp = vl_msg_api_alloc (sizeof (*rmp));
802 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
803 rmp->context = mp->context;
804 rmp->retval = retval;
806 vl_api_send_msg (reg, (u8 *) rmp);
809 /* Per Interface Combined distribution to client */
811 do_combined_per_interface_counters (stats_main_t * sm)
813 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
814 vnet_interface_main_t *im = sm->interface_main;
815 api_main_t *am = sm->api_main;
816 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
817 vl_api_registration_t *vl_reg;
818 vlib_combined_counter_main_t *cm;
819 vl_api_vnet_combined_counter_t *vp = 0;
822 vpe_client_stats_registration_t *reg;
823 vpe_client_registration_t *client;
824 u32 *sw_if_index = 0;
826 vnet_interface_counter_lock (im);
828 vec_reset_length (sm->regs_tmp);
832 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
833 ({ vec_add1 (sm->regs_tmp, reg); }));
836 for (i = 0; i < vec_len (sm->regs_tmp); i++)
838 reg = sm->regs_tmp[i];
841 vnet_interface_counter_unlock (im);
842 do_combined_interface_counters (sm);
843 vnet_interface_counter_lock (im);
846 vec_reset_length (sm->clients_tmp);
849 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
853 for (j = 0; j < vec_len (sm->clients_tmp); j++)
855 client = sm->clients_tmp[j];
857 vl_reg = vl_api_client_index_to_registration (client->client_index);
859 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
863 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
864 reg->item, client->client_index);
867 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
868 memset (mp, 0, sizeof (*mp));
871 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
874 * count will eventually be used to optimise the batching
875 * of per client messages for each stat. For now setting this to 1 then
876 * iterate. This will not affect API.
878 * FIXME instead of enqueueing here, this should be sent to a batch
879 * storer for per-client transmission. Each "mp" sent would be a single entry
880 * and if a client is listening to other sw_if_indexes for same, it would be
881 * appended to that *mp
885 * - capturing the timestamp of the counters "when VPP knew them" is important.
886 * Less so is that the timing of the delivery to the control plane be in the same
889 * i.e. As long as the control plane can delta messages from VPP and work out
890 * velocity etc based on the timestamp, it can do so in a more "batch mode".
892 * It would be beneficial to keep a "per-client" message queue, and then
893 * batch all the stat messages for a client into one message, with
894 * discrete timestamps.
896 * Given this particular API is for "per interface" one assumes that the scale
897 * is less than the ~0 case, which the prior API is suited for.
901 * 1 message per api call for now
903 mp->count = htonl (1);
904 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
906 vp = (vl_api_vnet_combined_counter_t *) mp->data;
907 vp->sw_if_index = htonl (reg->item);
909 im = &vnet_get_main ()->interface_main;
912 cm = im->combined_sw_if_counters + X; \
913 vlib_get_combined_counter (cm, reg->item, &v); \
914 clib_mem_unaligned (&vp->x##_packets, u64) = \
915 clib_host_to_net_u64 (v.packets); \
916 clib_mem_unaligned (&vp->x##_bytes, u64) = \
917 clib_host_to_net_u64 (v.bytes);
920 _(VNET_INTERFACE_COUNTER_RX, rx);
921 _(VNET_INTERFACE_COUNTER_TX, tx);
922 _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
923 _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
924 _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
925 _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
926 _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
927 _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
931 vl_api_send_msg (vl_reg, (u8 *) mp);
935 vnet_interface_counter_unlock (im);
938 /**********************************
939 * Per Interface simple stats
940 **********************************/
942 /* Request from client registering interfaces it wants */
944 vl_api_want_per_interface_simple_stats_t_handler
945 (vl_api_want_per_interface_simple_stats_t * mp)
947 stats_main_t *sm = &stats_main;
948 vpe_client_registration_t rp;
949 vl_api_want_per_interface_simple_stats_reply_t *rmp;
950 vlib_simple_counter_main_t *cm;
953 vl_api_registration_t *reg;
954 u32 i, swif, num = 0;
956 num = ntohl (mp->num);
958 for (i = 0; i < num; i++)
960 swif = ntohl (mp->sw_ifs[i]);
962 /* Check its a real sw_if_index that the client is allowed to see */
965 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
967 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
973 for (i = 0; i < num; i++)
975 swif = ntohl (mp->sw_ifs[i]);
977 rp.client_index = mp->client_index;
978 rp.client_pid = mp->pid;
979 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
980 swif, ntohl (mp->enable_disable));
984 reg = vl_api_client_index_to_registration (mp->client_index);
986 /* Client may have disconnected abruptly, clean up */
989 for (i = 0; i < num; i++)
991 swif = ntohl (mp->sw_ifs[i]);
993 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
1001 rmp = vl_msg_api_alloc (sizeof (*rmp));
1002 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
1003 rmp->context = mp->context;
1004 rmp->retval = retval;
1006 vl_api_send_msg (reg, (u8 *) rmp);
1009 /* Per Interface Simple distribution to client */
1011 do_simple_per_interface_counters (stats_main_t * sm)
1013 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
1014 vnet_interface_main_t *im = sm->interface_main;
1015 api_main_t *am = sm->api_main;
1016 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1017 vl_api_registration_t *vl_reg;
1018 vlib_simple_counter_main_t *cm;
1020 vpe_client_stats_registration_t *reg;
1021 vpe_client_registration_t *client;
1022 u32 timestamp, count;
1023 vl_api_vnet_simple_counter_t *vp = 0;
1026 vnet_interface_counter_lock (im);
1028 vec_reset_length (sm->regs_tmp);
1032 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
1033 ({ vec_add1 (sm->regs_tmp, reg); }));
1036 for (i = 0; i < vec_len (sm->regs_tmp); i++)
1038 reg = sm->regs_tmp[i];
1039 if (reg->item == ~0)
1041 vnet_interface_counter_unlock (im);
1042 do_simple_interface_counters (sm);
1043 vnet_interface_counter_lock (im);
1046 vec_reset_length (sm->clients_tmp);
1049 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
1053 for (j = 0; j < vec_len (sm->clients_tmp); j++)
1055 client = sm->clients_tmp[j];
1056 vl_reg = vl_api_client_index_to_registration (client->client_index);
1058 /* Client may have disconnected abrubtly, clean up */
1062 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1063 reg->item, client->client_index);
1067 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1068 memset (mp, 0, sizeof (*mp));
1069 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1072 * count will eventually be used to optimise the batching
1073 * of per client messages for each stat. For now setting this to 1 then
1074 * iterate. This will not affect API.
1076 * FIXME instead of enqueueing here, this should be sent to a batch
1077 * storer for per-client transmission. Each "mp" sent would be a single entry
1078 * and if a client is listening to other sw_if_indexes for same, it would be
1079 * appended to that *mp
1083 * - capturing the timestamp of the counters "when VPP knew them" is important.
1084 * Less so is that the timing of the delivery to the control plane be in the same
1087 * i.e. As long as the control plane can delta messages from VPP and work out
1088 * velocity etc based on the timestamp, it can do so in a more "batch mode".
1090 * It would be beneficial to keep a "per-client" message queue, and then
1091 * batch all the stat messages for a client into one message, with
1092 * discrete timestamps.
1094 * Given this particular API is for "per interface" one assumes that the scale
1095 * is less than the ~0 case, which the prior API is suited for.
1099 * 1 message per api call for now
1101 mp->count = htonl (1);
1102 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1103 vp = (vl_api_vnet_simple_counter_t *) mp->data;
1105 vp->sw_if_index = htonl (reg->item);
1107 // VNET_INTERFACE_COUNTER_DROP
1108 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1109 v = vlib_get_simple_counter (cm, reg->item);
1110 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1112 // VNET_INTERFACE_COUNTER_PUNT
1113 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1114 v = vlib_get_simple_counter (cm, reg->item);
1115 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1117 // VNET_INTERFACE_COUNTER_IP4
1118 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1119 v = vlib_get_simple_counter (cm, reg->item);
1120 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1122 //VNET_INTERFACE_COUNTER_IP6
1123 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1124 v = vlib_get_simple_counter (cm, reg->item);
1125 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1127 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1128 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1129 v = vlib_get_simple_counter (cm, reg->item);
1130 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1131 clib_host_to_net_u64 (v);
1133 //VNET_INTERFACE_COUNTER_RX_MISS
1134 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1135 v = vlib_get_simple_counter (cm, reg->item);
1136 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1138 //VNET_INTERFACE_COUNTER_RX_ERROR
1139 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1140 v = vlib_get_simple_counter (cm, reg->item);
1141 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1143 //VNET_INTERFACE_COUNTER_TX_ERROR
1144 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1145 v = vlib_get_simple_counter (cm, reg->item);
1146 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1148 //VNET_INTERFACE_COUNTER_MPLS
1149 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1150 v = vlib_get_simple_counter (cm, reg->item);
1151 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1153 vl_api_send_msg (vl_reg, (u8 *) mp);
1157 vnet_interface_counter_unlock (im);
1160 /**********************************
1162 **********************************/
1165 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1167 struct timespec _req, *req = &_req;
1168 struct timespec _rem, *rem = &_rem;
1171 req->tv_nsec = nsec;
1174 if (nanosleep (req, rem) == 0)
1179 clib_unix_warning ("nanosleep");
1185 * @brief The context passed when collecting adjacency counters
1187 typedef struct ip4_nbr_stats_ctx_t_
1190 * The SW IF index all these adjs belong to
1195 * A vector of ip4 nbr counters
1197 vl_api_ip4_nbr_counter_t *counters;
1198 } ip4_nbr_stats_ctx_t;
1200 static adj_walk_rc_t
1201 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1203 vl_api_ip4_nbr_counter_t *vl_counter;
1204 vlib_counter_t adj_counter;
1205 ip4_nbr_stats_ctx_t *ctx;
1206 ip_adjacency_t *adj;
1209 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1211 if (0 != adj_counter.packets)
1213 vec_add2 (ctx->counters, vl_counter, 1);
1216 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1217 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1218 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1219 vl_counter->link_type = adj->ia_link;
1221 return (ADJ_WALK_RC_CONTINUE);
1224 #define MIN(x,y) (((x)<(y))?(x):(y))
1227 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1229 api_main_t *am = sm->api_main;
1230 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1231 svm_queue_t *q = shmem_hdr->vl_input_queue;
1232 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1236 * If the walk context has counters, which may be left over from the last
1237 * suspend, then we continue from there.
1239 while (0 != vec_len (ctx->counters))
1241 u32 n_items = MIN (vec_len (ctx->counters),
1242 IP4_FIB_COUNTER_BATCH_SIZE);
1245 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1247 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1250 (vl_api_ip4_nbr_counter_t)));
1251 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1252 mp->count = ntohl (n_items);
1253 mp->sw_if_index = ntohl (ctx->sw_if_index);
1258 * copy the counters from the back of the context, then we can easily
1259 * 'erase' them by resetting the vector length.
1260 * The order we push the stats to the caller is not important.
1263 &ctx->counters[vec_len (ctx->counters) - n_items],
1264 n_items * sizeof (*ctx->counters));
1266 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1272 pause = svm_queue_is_full (q);
1274 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1275 svm_queue_unlock (q);
1279 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1280 STATS_RELEASE_DELAY_NS);
1285 do_ip4_nbr_counters (stats_main_t * sm)
1287 vnet_main_t *vnm = vnet_get_main ();
1288 vnet_interface_main_t *im = &vnm->interface_main;
1289 vnet_sw_interface_t *si;
1291 ip4_nbr_stats_ctx_t ctx = {
1297 pool_foreach (si, im->sw_interfaces,
1300 * update the interface we are now concerned with
1302 ctx.sw_if_index = si->sw_if_index;
1305 * we are about to walk another interface, so we shouldn't have any pending
1308 ASSERT(ctx.counters == NULL);
1311 * visit each neighbour adjacency on the interface and collect
1312 * its current stats.
1313 * Because we hold the lock the walk is synchronous, so safe to routing
1314 * updates. It's limited in work by the number of adjacenies on an
1315 * interface, which is typically not huge.
1317 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1318 adj_nbr_walk (si->sw_if_index,
1325 * if this interface has some adjacencies with counters then ship them,
1326 * else continue to the next interface.
1328 if (NULL != ctx.counters)
1330 ip4_nbr_ship(sm, &ctx);
1337 * @brief The context passed when collecting adjacency counters
1339 typedef struct ip6_nbr_stats_ctx_t_
1342 * The SW IF index all these adjs belong to
1347 * A vector of ip6 nbr counters
1349 vl_api_ip6_nbr_counter_t *counters;
1350 } ip6_nbr_stats_ctx_t;
1352 static adj_walk_rc_t
1353 ip6_nbr_stats_cb (adj_index_t ai,
1356 vl_api_ip6_nbr_counter_t *vl_counter;
1357 vlib_counter_t adj_counter;
1358 ip6_nbr_stats_ctx_t *ctx;
1359 ip_adjacency_t *adj;
1362 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1364 if (0 != adj_counter.packets)
1366 vec_add2(ctx->counters, vl_counter, 1);
1369 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1370 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1371 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1372 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1373 vl_counter->link_type = adj->ia_link;
1375 return (ADJ_WALK_RC_CONTINUE);
1378 #define MIN(x,y) (((x)<(y))?(x):(y))
1381 ip6_nbr_ship (stats_main_t * sm,
1382 ip6_nbr_stats_ctx_t *ctx)
1384 api_main_t *am = sm->api_main;
1385 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1386 svm_queue_t *q = shmem_hdr->vl_input_queue;
1387 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1391 * If the walk context has counters, which may be left over from the last
1392 * suspend, then we continue from there.
1394 while (0 != vec_len(ctx->counters))
1396 u32 n_items = MIN (vec_len (ctx->counters),
1397 IP6_FIB_COUNTER_BATCH_SIZE);
1400 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1402 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1405 (vl_api_ip6_nbr_counter_t)));
1406 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1407 mp->count = ntohl (n_items);
1408 mp->sw_if_index = ntohl (ctx->sw_if_index);
1413 * copy the counters from the back of the context, then we can easily
1414 * 'erase' them by resetting the vector length.
1415 * The order we push the stats to the caller is not important.
1418 &ctx->counters[vec_len (ctx->counters) - n_items],
1419 n_items * sizeof (*ctx->counters));
1421 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1427 pause = svm_queue_is_full (q);
1429 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1430 svm_queue_unlock (q);
1434 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1435 STATS_RELEASE_DELAY_NS);
1440 do_ip6_nbr_counters (stats_main_t * sm)
1442 vnet_main_t *vnm = vnet_get_main ();
1443 vnet_interface_main_t *im = &vnm->interface_main;
1444 vnet_sw_interface_t *si;
1446 ip6_nbr_stats_ctx_t ctx = {
1452 pool_foreach (si, im->sw_interfaces,
1455 * update the interface we are now concerned with
1457 ctx.sw_if_index = si->sw_if_index;
1460 * we are about to walk another interface, so we shouldn't have any pending
1463 ASSERT(ctx.counters == NULL);
1466 * visit each neighbour adjacency on the interface and collect
1467 * its current stats.
1468 * Because we hold the lock the walk is synchronous, so safe to routing
1469 * updates. It's limited in work by the number of adjacenies on an
1470 * interface, which is typically not huge.
1472 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1473 adj_nbr_walk (si->sw_if_index,
1480 * if this interface has some adjacencies with counters then ship them,
1481 * else continue to the next interface.
1483 if (NULL != ctx.counters)
1485 ip6_nbr_ship(sm, &ctx);
1492 do_ip4_fib_counters (stats_main_t * sm)
1494 ip4_main_t *im4 = &ip4_main;
1495 api_main_t *am = sm->api_main;
1496 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1497 svm_queue_t *q = shmem_hdr->vl_input_queue;
1501 do_ip46_fibs_t *do_fibs;
1502 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1503 u32 items_this_message;
1504 vl_api_ip4_fib_counter_t *ctrp = 0;
1505 u32 start_at_fib_index = 0;
1508 do_fibs = &sm->do_ip46_fibs;
1511 vec_reset_length (do_fibs->fibs);
1513 pool_foreach (fib, im4->fibs,
1514 ({vec_add1(do_fibs->fibs,fib);}));
1518 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1520 fib = do_fibs->fibs[j];
1521 /* We may have bailed out due to control-plane activity */
1522 while ((fib - im4->fibs) < start_at_fib_index)
1525 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1529 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1530 mp = vl_msg_api_alloc_as_if_client
1532 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1533 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1535 mp->vrf_id = ntohl (fib->ft_table_id);
1536 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1540 /* happens if the last FIB was empty... */
1541 ASSERT (mp->count == 0);
1542 mp->vrf_id = ntohl (fib->ft_table_id);
1545 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1547 vec_reset_length (do_fibs->ip4routes);
1548 vec_reset_length (do_fibs->results);
1550 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1552 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1556 vec_reset_length (do_fibs->pvec);
1558 x.address_length = i;
1560 hash_foreach_pair (p, hash, (
1562 vec_add1 (do_fibs->pvec, p);}
1564 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1566 p = do_fibs->pvec[k];
1567 x.address.data_u32 = p->key;
1568 x.index = p->value[0];
1570 vec_add1 (do_fibs->ip4routes, x);
1571 if (sm->data_structure_lock->release_hint)
1573 start_at_fib_index = fib - im4->fibs;
1575 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1576 STATS_RELEASE_DELAY_NS);
1578 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1584 vec_foreach (r, do_fibs->ip4routes)
1587 const dpo_id_t *dpo_id;
1590 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1591 index = (u32) dpo_id->dpoi_index;
1593 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1596 * If it has actually
1597 * seen at least one packet, send it.
1602 /* already in net byte order */
1603 ctrp->address = r->address.as_u32;
1604 ctrp->address_length = r->address_length;
1605 ctrp->packets = clib_host_to_net_u64 (c.packets);
1606 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1610 if (mp->count == items_this_message)
1612 mp->count = htonl (items_this_message);
1614 * If the main thread's input queue is stuffed,
1615 * drop the data structure lock (which the main thread
1616 * may want), and take a pause.
1619 if (svm_queue_is_full (q))
1622 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1623 svm_queue_unlock (q);
1625 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1626 STATS_RELEASE_DELAY_NS);
1629 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1630 svm_queue_unlock (q);
1632 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1633 mp = vl_msg_api_alloc_as_if_client
1635 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1636 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1638 mp->vrf_id = ntohl (fib->ft_table_id);
1639 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1641 } /* for each (mp or single) adj */
1642 if (sm->data_structure_lock->release_hint)
1644 start_at_fib_index = fib - im4->fibs;
1646 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1648 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1651 } /* vec_foreach (routes) */
1655 /* Flush any data from this fib */
1658 mp->count = htonl (mp->count);
1659 vl_msg_api_send_shmem (q, (u8 *) & mp);
1664 /* If e.g. the last FIB had no reportable routes, free the buffer */
1666 vl_msg_api_free (mp);
1670 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1672 stats_main_t *sm = ctx;
1673 do_ip46_fibs_t *do_fibs;
1674 mfib_entry_t *entry;
1676 do_fibs = &sm->do_ip46_fibs;
1677 entry = mfib_entry_get (fei);
1679 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1685 do_ip4_mfib_counters (stats_main_t * sm)
1687 ip4_main_t *im4 = &ip4_main;
1688 api_main_t *am = sm->api_main;
1689 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1690 svm_queue_t *q = shmem_hdr->vl_input_queue;
1693 do_ip46_fibs_t *do_fibs;
1694 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1695 u32 items_this_message;
1696 vl_api_ip4_mfib_counter_t *ctrp = 0;
1697 u32 start_at_mfib_index = 0;
1700 do_fibs = &sm->do_ip46_fibs;
1702 vec_reset_length (do_fibs->mfibs);
1704 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1707 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1709 mfib = do_fibs->mfibs[j];
1710 /* We may have bailed out due to control-plane activity */
1711 while ((mfib - im4->mfibs) < start_at_mfib_index)
1716 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1717 mp = vl_msg_api_alloc_as_if_client
1719 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1720 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1722 mp->vrf_id = ntohl (mfib->mft_table_id);
1723 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1727 /* happens if the last MFIB was empty... */
1728 ASSERT (mp->count == 0);
1729 mp->vrf_id = ntohl (mfib->mft_table_id);
1732 vec_reset_length (do_fibs->mroutes);
1735 * walk the table with table updates blocked
1737 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1739 mfib_table_walk (mfib->mft_index,
1740 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1743 vec_foreach (pfx, do_fibs->mroutes)
1745 const dpo_id_t *dpo_id;
1746 fib_node_index_t mfei;
1751 * re-lookup the entry, since we suspend during the collection
1753 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1755 if (FIB_NODE_INDEX_INVALID == mfei)
1758 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1759 index = (u32) dpo_id->dpoi_index;
1761 vlib_get_combined_counter (&replicate_main.repm_counters,
1762 dpo_id->dpoi_index, &c);
1764 * If it has seen at least one packet, send it.
1768 /* already in net byte order */
1769 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1770 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1771 ctrp->group_length = pfx->fp_len;
1772 ctrp->packets = clib_host_to_net_u64 (c.packets);
1773 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1777 if (mp->count == items_this_message)
1779 mp->count = htonl (items_this_message);
1781 * If the main thread's input queue is stuffed,
1782 * drop the data structure lock (which the main thread
1783 * may want), and take a pause.
1787 while (svm_queue_is_full (q))
1789 svm_queue_unlock (q);
1790 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1791 STATS_RELEASE_DELAY_NS);
1794 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1795 svm_queue_unlock (q);
1797 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1798 mp = vl_msg_api_alloc_as_if_client
1800 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1801 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1803 mp->vrf_id = ntohl (mfib->mft_table_id);
1804 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1809 /* Flush any data from this mfib */
1812 mp->count = htonl (mp->count);
1813 vl_msg_api_send_shmem (q, (u8 *) & mp);
1818 /* If e.g. the last FIB had no reportable routes, free the buffer */
1820 vl_msg_api_free (mp);
1824 do_ip6_mfib_counters (stats_main_t * sm)
1826 ip6_main_t *im6 = &ip6_main;
1827 api_main_t *am = sm->api_main;
1828 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1829 svm_queue_t *q = shmem_hdr->vl_input_queue;
1832 do_ip46_fibs_t *do_fibs;
1833 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1834 u32 items_this_message;
1835 vl_api_ip6_mfib_counter_t *ctrp = 0;
1836 u32 start_at_mfib_index = 0;
1839 do_fibs = &sm->do_ip46_fibs;
1841 vec_reset_length (do_fibs->mfibs);
1843 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1846 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1848 mfib = do_fibs->mfibs[j];
1849 /* We may have bailed out due to control-plane activity */
1850 while ((mfib - im6->mfibs) < start_at_mfib_index)
1855 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1856 mp = vl_msg_api_alloc_as_if_client
1858 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1859 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1861 mp->vrf_id = ntohl (mfib->mft_table_id);
1862 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1866 /* happens if the last MFIB was empty... */
1867 ASSERT (mp->count == 0);
1868 mp->vrf_id = ntohl (mfib->mft_table_id);
1871 vec_reset_length (do_fibs->mroutes);
1874 * walk the table with table updates blocked
1876 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1878 mfib_table_walk (mfib->mft_index,
1879 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1882 vec_foreach (pfx, do_fibs->mroutes)
1884 const dpo_id_t *dpo_id;
1885 fib_node_index_t mfei;
1890 * re-lookup the entry, since we suspend during the collection
1892 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1894 if (FIB_NODE_INDEX_INVALID == mfei)
1897 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1898 index = (u32) dpo_id->dpoi_index;
1900 vlib_get_combined_counter (&replicate_main.repm_counters,
1901 dpo_id->dpoi_index, &c);
1903 * If it has seen at least one packet, send it.
1907 /* already in net byte order */
1908 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1909 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1910 ctrp->group_length = pfx->fp_len;
1911 ctrp->packets = clib_host_to_net_u64 (c.packets);
1912 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1916 if (mp->count == items_this_message)
1918 mp->count = htonl (items_this_message);
1920 * If the main thread's input queue is stuffed,
1921 * drop the data structure lock (which the main thread
1922 * may want), and take a pause.
1926 while (svm_queue_is_full (q))
1928 svm_queue_unlock (q);
1929 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1930 STATS_RELEASE_DELAY_NS);
1933 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1934 svm_queue_unlock (q);
1936 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1937 mp = vl_msg_api_alloc_as_if_client
1939 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1940 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1942 mp->vrf_id = ntohl (mfib->mft_table_id);
1943 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1948 /* Flush any data from this mfib */
1951 mp->count = htonl (mp->count);
1952 vl_msg_api_send_shmem (q, (u8 *) & mp);
1957 /* If e.g. the last FIB had no reportable routes, free the buffer */
1959 vl_msg_api_free (mp);
1965 ip6_route_t **routep;
1967 } add_routes_in_fib_arg_t;
1970 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1972 add_routes_in_fib_arg_t *ap = arg;
1973 stats_main_t *sm = ap->sm;
1975 if (sm->data_structure_lock->release_hint)
1976 clib_longjmp (&sm->jmp_buf, 1);
1978 if (kvp->key[2] >> 32 == ap->fib_index)
1980 ip6_address_t *addr;
1982 addr = (ip6_address_t *) kvp;
1983 vec_add2 (*ap->routep, r, 1);
1984 r->address = addr[0];
1985 r->address_length = kvp->key[2] & 0xFF;
1986 r->index = kvp->value;
1991 do_ip6_fib_counters (stats_main_t * sm)
1993 ip6_main_t *im6 = &ip6_main;
1994 api_main_t *am = sm->api_main;
1995 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1996 svm_queue_t *q = shmem_hdr->vl_input_queue;
1999 do_ip46_fibs_t *do_fibs;
2000 vl_api_vnet_ip6_fib_counters_t *mp = 0;
2001 u32 items_this_message;
2002 vl_api_ip6_fib_counter_t *ctrp = 0;
2003 u32 start_at_fib_index = 0;
2004 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
2005 add_routes_in_fib_arg_t _a, *a = &_a;
2008 do_fibs = &sm->do_ip46_fibs;
2010 vec_reset_length (do_fibs->fibs);
2012 pool_foreach (fib, im6->fibs,
2013 ({vec_add1(do_fibs->fibs,fib);}));
2017 for (i = 0; i < vec_len (do_fibs->fibs); i++)
2019 fib = do_fibs->fibs[i];
2020 /* We may have bailed out due to control-plane activity */
2021 while ((fib - im6->fibs) < start_at_fib_index)
2026 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2027 mp = vl_msg_api_alloc_as_if_client
2029 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2030 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2032 mp->vrf_id = ntohl (fib->ft_table_id);
2033 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2036 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2038 vec_reset_length (do_fibs->ip6routes);
2039 vec_reset_length (do_fibs->results);
2041 a->fib_index = fib - im6->fibs;
2042 a->routep = &do_fibs->ip6routes;
2045 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
2047 start_at_fib_index = fib - im6->fibs;
2048 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
2053 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2054 STATS_RELEASE_DELAY_NS);
2056 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2060 vec_foreach (r, do_fibs->ip6routes)
2064 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2067 * If it has actually
2068 * seen at least one packet, send it.
2072 /* already in net byte order */
2073 ctrp->address[0] = r->address.as_u64[0];
2074 ctrp->address[1] = r->address.as_u64[1];
2075 ctrp->address_length = (u8) r->address_length;
2076 ctrp->packets = clib_host_to_net_u64 (c.packets);
2077 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2081 if (mp->count == items_this_message)
2083 mp->count = htonl (items_this_message);
2085 * If the main thread's input queue is stuffed,
2086 * drop the data structure lock (which the main thread
2087 * may want), and take a pause.
2090 if (svm_queue_is_full (q))
2093 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2094 svm_queue_unlock (q);
2096 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2097 STATS_RELEASE_DELAY_NS);
2100 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2101 svm_queue_unlock (q);
2103 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2104 mp = vl_msg_api_alloc_as_if_client
2106 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2107 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2109 mp->vrf_id = ntohl (fib->ft_table_id);
2110 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2114 if (sm->data_structure_lock->release_hint)
2116 start_at_fib_index = fib - im6->fibs;
2118 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2120 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2123 } /* vec_foreach (routes) */
2127 /* Flush any data from this fib */
2130 mp->count = htonl (mp->count);
2131 vl_msg_api_send_shmem (q, (u8 *) & mp);
2136 /* If e.g. the last FIB had no reportable routes, free the buffer */
2138 vl_msg_api_free (mp);
2141 typedef struct udp_encap_stat_t_
2147 typedef struct udp_encap_stats_walk_t_
2149 udp_encap_stat_t *stats;
2150 } udp_encap_stats_walk_t;
2153 udp_encap_stats_walk_cb (index_t uei, void *arg)
2155 udp_encap_stats_walk_t *ctx = arg;
2156 udp_encap_stat_t *stat;
2159 ue = udp_encap_get (uei);
2160 vec_add2 (ctx->stats, stat, 1);
2163 udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]);
2169 udp_encap_ship (udp_encap_stats_walk_t * ctx)
2171 vl_api_vnet_udp_encap_counters_t *mp;
2172 vl_shmem_hdr_t *shmem_hdr;
2180 shmem_hdr = am->shmem_hdr;
2181 q = shmem_hdr->vl_input_queue;
2184 * If the walk context has counters, which may be left over from the last
2185 * suspend, then we continue from there.
2187 while (0 != vec_len (ctx->stats))
2189 u32 n_items = MIN (vec_len (ctx->stats),
2190 UDP_ENCAP_COUNTER_BATCH_SIZE);
2193 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2195 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2198 (vl_api_udp_encap_counter_t)));
2199 mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2200 mp->count = ntohl (n_items);
2203 * copy the counters from the back of the context, then we can easily
2204 * 'erase' them by resetting the vector length.
2205 * The order we push the stats to the caller is not important.
2208 &ctx->stats[vec_len (ctx->stats) - n_items],
2209 n_items * sizeof (*ctx->stats));
2211 _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2217 pause = svm_queue_is_full (q);
2219 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2220 svm_queue_unlock (q);
2224 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2225 STATS_RELEASE_DELAY_NS);
2230 do_udp_encap_counters (stats_main_t * sm)
2232 udp_encap_stat_t *stat;
2234 udp_encap_stats_walk_t ctx = {
2238 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2239 udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
2242 udp_encap_ship (&ctx);
2246 stats_set_poller_delay (u32 poller_delay_sec)
2248 stats_main_t *sm = &stats_main;
2249 if (!poller_delay_sec)
2251 return VNET_API_ERROR_INVALID_ARGUMENT;
2255 sm->stats_poll_interval_in_seconds = poller_delay_sec;
2261 * Accept connection on the socket and exchange the fd for the shared
2264 static clib_error_t *
2265 stats_socket_accept_ready (clib_file_t * uf)
2267 stats_main_t *sm = &stats_main;
2268 ssvm_private_t *ssvmp = &sm->stat_segment;
2270 clib_socket_t client = { 0 };
2272 err = clib_socket_accept (sm->socket, &client);
2275 clib_error_report (err);
2279 /* Send the fd across and close */
2280 err = clib_socket_sendmsg (&client, 0, 0, &ssvmp->fd, 1);
2282 clib_error_report (err);
2283 clib_socket_close (&client);
2289 stats_segment_socket_init (void)
2291 stats_main_t *sm = &stats_main;
2292 clib_error_t *error;
2293 clib_socket_t *s = clib_mem_alloc (sizeof (clib_socket_t));
2295 s->config = (char *) sm->socket_name;
2296 s->flags = CLIB_SOCKET_F_IS_SERVER | CLIB_SOCKET_F_SEQPACKET |
2297 CLIB_SOCKET_F_ALLOW_GROUP_WRITE | CLIB_SOCKET_F_PASSCRED;
2298 if ((error = clib_socket_init (s)))
2300 clib_error_report (error);
2304 clib_file_t template = { 0 };
2305 clib_file_main_t *fm = &file_main;
2306 template.read_function = stats_socket_accept_ready;
2307 template.file_descriptor = s->fd;
2308 template.description =
2309 format (0, "stats segment listener %s", STAT_SEGMENT_SOCKET_FILE);
2310 clib_file_add (fm, &template);
2315 static clib_error_t *
2316 stats_config (vlib_main_t * vm, unformat_input_t * input)
2318 stats_main_t *sm = &stats_main;
2321 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2323 if (unformat (input, "socket-name %s", &sm->socket_name))
2325 else if (unformat (input, "default"))
2326 sm->socket_name = format (0, "%s", STAT_SEGMENT_SOCKET_FILE);
2327 else if (unformat (input, "interval %u", &sec))
2329 int rv = stats_set_poller_delay (sec);
2332 return clib_error_return (0,
2333 "`stats_set_poller_delay' API call failed, rv=%d:%U",
2334 (int) rv, format_vnet_api_errno, rv);
2339 return clib_error_return (0, "unknown input '%U'",
2340 format_unformat_error, input);
2344 if (sm->socket_name)
2345 stats_segment_socket_init ();
2350 /* stats { ... } configuration. */
2353 * @cfgcmd{interval, <seconds>}
2354 * Configure stats poller delay to be @c seconds.
2357 VLIB_CONFIG_FUNCTION (stats_config, "stats");
2360 vl_api_stats_get_poller_delay_t_handler
2361 (vl_api_stats_get_poller_delay_t * mp)
2363 stats_main_t *sm = &stats_main;
2364 vl_api_registration_t *reg;
2365 reg = vl_api_client_index_to_registration (mp->client_index);
2368 vl_api_stats_get_poller_delay_reply_t *rmp;
2370 rmp = vl_msg_api_alloc (sizeof (*rmp));
2371 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
2372 rmp->context = mp->context;
2374 rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2376 vl_api_send_msg (reg, (u8 *) rmp);
2381 stats_thread_fn (void *arg)
2383 stats_main_t *sm = &stats_main;
2384 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2385 vlib_thread_main_t *tm = vlib_get_thread_main ();
2387 /* stats thread wants no signals. */
2391 pthread_sigmask (SIG_SETMASK, &s, 0);
2394 if (vec_len (tm->thread_prefix))
2395 vlib_set_thread_name ((char *)
2396 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2398 clib_mem_set_heap (w->thread_mheap);
2402 ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
2405 /* Always update stats segment data */
2406 do_stat_segment_updates (sm);
2408 if (!(sm->enable_poller))
2412 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2413 do_combined_per_interface_counters (sm);
2416 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2417 do_simple_per_interface_counters (sm);
2419 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2420 do_ip4_fib_counters (sm);
2422 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2423 do_ip6_fib_counters (sm);
2425 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2426 do_ip4_mfib_counters (sm);
2428 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2429 do_ip6_mfib_counters (sm);
2431 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2432 do_ip4_nbr_counters (sm);
2434 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2435 do_ip6_nbr_counters (sm);
2437 if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS]))
2438 do_udp_encap_counters (sm);
2443 vl_api_vnet_interface_simple_counters_t_handler
2444 (vl_api_vnet_interface_simple_counters_t * mp)
2446 vpe_client_registration_t *clients, client;
2447 stats_main_t *sm = &stats_main;
2448 vl_api_registration_t *reg, *reg_prev = NULL;
2449 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2453 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2456 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2457 ~0 /*flag for all */ );
2459 for (i = 0; i < vec_len (clients); i++)
2461 client = clients[i];
2462 reg = vl_api_client_index_to_registration (client.client_index);
2465 if (reg_prev && vl_api_can_send_msg (reg_prev))
2467 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2468 clib_memcpy (mp_copy, mp, mp_size);
2469 vl_api_send_msg (reg_prev, (u8 *) mp);
2477 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2478 client.client_index);
2485 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2488 if (reg_prev && vl_api_can_send_msg (reg_prev))
2490 vl_api_send_msg (reg_prev, (u8 *) mp);
2494 vl_msg_api_free (mp);
2499 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2501 stats_main_t *sm = &stats_main;
2502 vl_api_registration_t *reg, *reg_prev = NULL;
2503 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2505 vpe_client_registration_t *clients, client;
2508 mp_size = sizeof (*mp_copy) +
2509 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2512 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2514 for (i = 0; i < vec_len (clients); i++)
2516 client = clients[i];
2517 reg = vl_api_client_index_to_registration (client.client_index);
2520 if (reg_prev && vl_api_can_send_msg (reg_prev))
2522 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2523 clib_memcpy (mp_copy, mp, mp_size);
2524 vl_api_send_msg (reg_prev, (u8 *) mp);
2531 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2532 ~0, client.client_index);
2538 if (reg_prev && vl_api_can_send_msg (reg_prev))
2540 vl_api_send_msg (reg_prev, (u8 *) mp);
2544 vl_msg_api_free (mp);
2549 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2551 stats_main_t *sm = &stats_main;
2552 vl_api_registration_t *reg, *reg_prev = NULL;
2553 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2555 vpe_client_registration_t *clients, client;
2558 mp_size = sizeof (*mp_copy) +
2559 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2562 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2564 for (i = 0; i < vec_len (clients); i++)
2566 client = clients[i];
2567 reg = vl_api_client_index_to_registration (client.client_index);
2570 if (reg_prev && vl_api_can_send_msg (reg_prev))
2572 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2573 clib_memcpy (mp_copy, mp, mp_size);
2574 vl_api_send_msg (reg_prev, (u8 *) mp);
2581 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2582 ~0, client.client_index);
2589 if (reg_prev && vl_api_can_send_msg (reg_prev))
2591 vl_api_send_msg (reg_prev, (u8 *) mp);
2595 vl_msg_api_free (mp);
2600 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2602 stats_main_t *sm = &stats_main;
2603 vl_api_registration_t *reg, *reg_prev = NULL;
2604 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2606 vpe_client_registration_t *clients, client;
2609 mp_size = sizeof (*mp_copy) +
2610 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2613 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2615 for (i = 0; i < vec_len (clients); i++)
2617 client = clients[i];
2618 reg = vl_api_client_index_to_registration (client.client_index);
2621 if (reg_prev && vl_api_can_send_msg (reg_prev))
2623 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2624 clib_memcpy (mp_copy, mp, mp_size);
2625 vl_api_send_msg (reg_prev, (u8 *) mp);
2632 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2633 ~0, client.client_index);
2640 if (reg_prev && vl_api_can_send_msg (reg_prev))
2642 vl_api_send_msg (reg_prev, (u8 *) mp);
2646 vl_msg_api_free (mp);
2651 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2653 stats_main_t *sm = &stats_main;
2654 vl_api_registration_t *reg, *reg_prev = NULL;
2655 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2657 vpe_client_registration_t *clients, client;
2660 mp_size = sizeof (*mp_copy) +
2661 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2664 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2666 for (i = 0; i < vec_len (clients); i++)
2668 client = clients[i];
2669 reg = vl_api_client_index_to_registration (client.client_index);
2672 if (reg_prev && vl_api_can_send_msg (reg_prev))
2674 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2675 clib_memcpy (mp_copy, mp, mp_size);
2676 vl_api_send_msg (reg_prev, (u8 *) mp);
2683 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2684 ~0, client.client_index);
2691 if (reg_prev && vl_api_can_send_msg (reg_prev))
2693 vl_api_send_msg (reg_prev, (u8 *) mp);
2697 vl_msg_api_free (mp);
2702 vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
2704 stats_main_t *sm = &stats_main;
2705 vpe_client_registration_t rp;
2706 vl_api_want_udp_encap_stats_reply_t *rmp;
2709 vl_api_registration_t *reg;
2712 fib = ~0; //Using same mechanism as _per_interface_
2713 rp.client_index = mp->client_index;
2714 rp.client_pid = mp->pid;
2716 handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2719 reg = vl_api_client_index_to_registration (mp->client_index);
2723 sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2724 fib, mp->client_index);
2728 rmp = vl_msg_api_alloc (sizeof (*rmp));
2729 rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2730 rmp->context = mp->context;
2731 rmp->retval = retval;
2733 vl_api_send_msg (reg, (u8 *) rmp);
2737 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2739 stats_main_t *sm = &stats_main;
2740 vpe_client_registration_t rp;
2741 vl_api_want_stats_reply_t *rmp;
2745 vl_api_registration_t *reg;
2747 item = ~0; //"ALL THE THINGS IN THE THINGS
2748 rp.client_index = mp->client_index;
2749 rp.client_pid = mp->pid;
2751 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2752 item, mp->enable_disable);
2754 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2755 item, mp->enable_disable);
2757 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2758 item, mp->enable_disable);
2760 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2761 item, mp->enable_disable);
2763 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2764 item, mp->enable_disable);
2766 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2767 item, mp->enable_disable);
2770 reg = vl_api_client_index_to_registration (mp->client_index);
2774 rmp = vl_msg_api_alloc (sizeof (*rmp));
2775 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2776 rmp->context = mp->context;
2777 rmp->retval = retval;
2779 vl_api_send_msg (reg, (u8 *) rmp);
2783 vl_api_want_interface_simple_stats_t_handler
2784 (vl_api_want_interface_simple_stats_t * mp)
2786 stats_main_t *sm = &stats_main;
2787 vpe_client_registration_t rp;
2788 vl_api_want_interface_simple_stats_reply_t *rmp;
2792 vl_api_registration_t *reg;
2794 swif = ~0; //Using same mechanism as _per_interface_
2795 rp.client_index = mp->client_index;
2796 rp.client_pid = mp->pid;
2798 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2799 mp->enable_disable);
2802 reg = vl_api_client_index_to_registration (mp->client_index);
2807 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2812 rmp = vl_msg_api_alloc (sizeof (*rmp));
2813 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2814 rmp->context = mp->context;
2815 rmp->retval = retval;
2817 vl_api_send_msg (reg, (u8 *) rmp);
2822 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2824 stats_main_t *sm = &stats_main;
2825 vpe_client_registration_t rp;
2826 vl_api_want_ip4_fib_stats_reply_t *rmp;
2829 vl_api_registration_t *reg;
2832 fib = ~0; //Using same mechanism as _per_interface_
2833 rp.client_index = mp->client_index;
2834 rp.client_pid = mp->pid;
2836 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2837 mp->enable_disable);
2840 reg = vl_api_client_index_to_registration (mp->client_index);
2844 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2845 fib, mp->client_index);
2849 rmp = vl_msg_api_alloc (sizeof (*rmp));
2850 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2851 rmp->context = mp->context;
2852 rmp->retval = retval;
2854 vl_api_send_msg (reg, (u8 *) rmp);
2858 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2860 stats_main_t *sm = &stats_main;
2861 vpe_client_registration_t rp;
2862 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2865 vl_api_registration_t *reg;
2868 mfib = ~0; //Using same mechanism as _per_interface_
2869 rp.client_index = mp->client_index;
2870 rp.client_pid = mp->pid;
2872 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2873 mp->enable_disable);
2876 reg = vl_api_client_index_to_registration (mp->client_index);
2879 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2880 mfib, mp->client_index);
2884 rmp = vl_msg_api_alloc (sizeof (*rmp));
2885 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2886 rmp->context = mp->context;
2887 rmp->retval = retval;
2889 vl_api_send_msg (reg, (u8 *) rmp);
2893 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2895 stats_main_t *sm = &stats_main;
2896 vpe_client_registration_t rp;
2897 vl_api_want_ip4_fib_stats_reply_t *rmp;
2900 vl_api_registration_t *reg;
2903 fib = ~0; //Using same mechanism as _per_interface_
2904 rp.client_index = mp->client_index;
2905 rp.client_pid = mp->pid;
2907 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2908 mp->enable_disable);
2911 reg = vl_api_client_index_to_registration (mp->client_index);
2914 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2915 fib, mp->client_index);
2919 rmp = vl_msg_api_alloc (sizeof (*rmp));
2920 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2921 rmp->context = mp->context;
2922 rmp->retval = retval;
2924 vl_api_send_msg (reg, (u8 *) rmp);
2928 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2930 stats_main_t *sm = &stats_main;
2931 vpe_client_registration_t rp;
2932 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2935 vl_api_registration_t *reg;
2938 mfib = ~0; //Using same mechanism as _per_interface_
2939 rp.client_index = mp->client_index;
2940 rp.client_pid = mp->pid;
2942 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2943 mp->enable_disable);
2946 reg = vl_api_client_index_to_registration (mp->client_index);
2949 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2950 mfib, mp->client_index);
2954 rmp = vl_msg_api_alloc (sizeof (*rmp));
2955 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2956 rmp->context = mp->context;
2957 rmp->retval = retval;
2959 vl_api_send_msg (reg, (u8 *) rmp);
2962 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2964 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2969 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2974 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2976 stats_main_t *sm = &stats_main;
2977 vnet_interface_main_t *im = sm->interface_main;
2978 vl_api_vnet_get_summary_stats_reply_t *rmp;
2979 vlib_combined_counter_main_t *cm;
2982 u64 total_pkts[VLIB_N_RX_TX];
2983 u64 total_bytes[VLIB_N_RX_TX];
2984 vl_api_registration_t *reg;
2986 reg = vl_api_client_index_to_registration (mp->client_index);
2990 rmp = vl_msg_api_alloc (sizeof (*rmp));
2991 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2992 rmp->context = mp->context;
2995 memset (total_pkts, 0, sizeof (total_pkts));
2996 memset (total_bytes, 0, sizeof (total_bytes));
2998 vnet_interface_counter_lock (im);
3000 vec_foreach (cm, im->combined_sw_if_counters)
3002 which = cm - im->combined_sw_if_counters;
3004 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
3006 vlib_get_combined_counter (cm, i, &v);
3007 total_pkts[which] += v.packets;
3008 total_bytes[which] += v.bytes;
3011 vnet_interface_counter_unlock (im);
3013 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
3014 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
3015 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
3016 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
3018 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
3020 vl_api_send_msg (reg, (u8 *) rmp);
3024 stats_memclnt_delete_callback (u32 client_index)
3026 vpe_client_stats_registration_t *rp;
3027 stats_main_t *sm = &stats_main;
3031 /* p = hash_get (sm->stats_registration_hash, client_index); */
3034 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
3035 /* pool_put (sm->stats_registrations, rp); */
3036 /* hash_unset (sm->stats_registration_hash, client_index); */
3042 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
3043 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
3044 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
3045 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
3046 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
3047 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
3048 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
3049 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
3050 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
3051 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
3052 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
3053 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
3055 static clib_error_t *
3056 stats_init (vlib_main_t * vm)
3058 stats_main_t *sm = &stats_main;
3059 api_main_t *am = &api_main;
3060 void *vlib_worker_thread_bootstrap_fn (void *arg);
3063 sm->vnet_main = vnet_get_main ();
3064 sm->interface_main = &vnet_get_main ()->interface_main;
3066 sm->stats_poll_interval_in_seconds = 10;
3067 sm->data_structure_lock =
3068 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
3069 CLIB_CACHE_LINE_BYTES);
3070 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
3073 vl_msg_api_set_handlers(VL_API_##N, #n, \
3074 vl_api_##n##_t_handler, \
3076 vl_api_##n##_t_endian, \
3077 vl_api_##n##_t_print, \
3078 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
3082 /* tell the msg infra not to free these messages... */
3083 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
3084 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
3085 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
3086 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
3087 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
3088 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
3091 * Set up the (msg_name, crc, message-id) table
3093 setup_message_id_table (am);
3095 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
3096 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
3097 #define stats_reg(n) \
3098 sm->stats_registrations[IDX_##n] = 0; \
3099 sm->stats_registration_hash[IDX_##n] = 0;
3100 #include <vpp/stats/stats.reg>
3106 VLIB_INIT_FUNCTION (stats_init);
3109 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
3111 .function = stats_thread_fn,
3114 .no_data_structure_clone = 1,
3120 * fd.io coding-style-patch-verification: ON
3123 * eval: (c-set-style "gnu")