2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
24 stats_main_t stats_main;
26 #include <vnet/ip/ip.h>
28 #include <vpp/api/vpe_msg_enum.h>
31 #define f64_print(a,b)
33 #define vl_typedefs /* define message structures */
34 #include <vpp/api/vpe_all_api_h.h>
37 #define vl_endianfun /* define message structures */
38 #include <vpp/api/vpe_all_api_h.h>
41 /* instantiate all the print functions we know about */
42 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
44 #include <vpp/api/vpe_all_api_h.h>
47 #define foreach_stats_msg \
48 _(WANT_STATS, want_stats) \
49 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
50 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
51 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
52 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
53 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
55 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
56 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
57 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
58 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
59 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
60 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
61 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
62 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
63 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
64 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
65 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats)
68 #define vl_msg_name_crc_list
69 #include <vpp/stats/stats.api.h>
70 #undef vl_msg_name_crc_list
73 setup_message_id_table (api_main_t * am)
76 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
77 foreach_vl_msg_name_crc_stats;
81 /* These constants ensure msg sizes <= 1024, aka ring allocation */
82 #define SIMPLE_COUNTER_BATCH_SIZE 126
83 #define COMBINED_COUNTER_BATCH_SIZE 63
84 #define IP4_FIB_COUNTER_BATCH_SIZE 48
85 #define IP6_FIB_COUNTER_BATCH_SIZE 30
86 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
87 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
90 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
94 format_vnet_interface_combined_counters (u8 * s, va_list * args)
96 stats_main_t *sm = &stats_main;
97 vl_api_vnet_interface_combined_counters_t *mp =
98 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
101 u32 count, sw_if_index;
103 count = ntohl (mp->count);
104 sw_if_index = ntohl (mp->first_sw_if_index);
108 vp = (vlib_counter_t *) mp->data;
110 switch (mp->vnet_counter_type)
112 case VNET_INTERFACE_COUNTER_RX:
115 case VNET_INTERFACE_COUNTER_TX:
119 counter_name = "bogus";
122 for (i = 0; i < count; i++)
124 packets = clib_mem_unaligned (&vp->packets, u64);
125 packets = clib_net_to_host_u64 (packets);
126 bytes = clib_mem_unaligned (&vp->bytes, u64);
127 bytes = clib_net_to_host_u64 (bytes);
129 s = format (s, "%U.%s.packets %lld\n",
130 format_vnet_sw_if_index_name,
131 sm->vnet_main, sw_if_index, counter_name, packets);
132 s = format (s, "%U.%s.bytes %lld\n",
133 format_vnet_sw_if_index_name,
134 sm->vnet_main, sw_if_index, counter_name, bytes);
141 format_vnet_interface_simple_counters (u8 * s, va_list * args)
143 stats_main_t *sm = &stats_main;
144 vl_api_vnet_interface_simple_counters_t *mp =
145 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
147 u32 count, sw_if_index;
148 count = ntohl (mp->count);
149 sw_if_index = ntohl (mp->first_sw_if_index);
151 vp = (u64 *) mp->data;
154 switch (mp->vnet_counter_type)
156 case VNET_INTERFACE_COUNTER_DROP:
157 counter_name = "drop";
159 case VNET_INTERFACE_COUNTER_PUNT:
160 counter_name = "punt";
162 case VNET_INTERFACE_COUNTER_IP4:
163 counter_name = "ip4";
165 case VNET_INTERFACE_COUNTER_IP6:
166 counter_name = "ip6";
168 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
169 counter_name = "rx-no-buff";
171 case VNET_INTERFACE_COUNTER_RX_MISS:
172 counter_name = "rx-miss";
174 case VNET_INTERFACE_COUNTER_RX_ERROR:
175 counter_name = "rx-error (fifo-full)";
177 case VNET_INTERFACE_COUNTER_TX_ERROR:
178 counter_name = "tx-error (fifo-full)";
181 counter_name = "bogus";
184 for (i = 0; i < count; i++)
186 v = clib_mem_unaligned (vp, u64);
187 v = clib_net_to_host_u64 (v);
189 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
190 sm->vnet_main, sw_if_index, counter_name, v);
198 dslock (stats_main_t * sm, int release_hint, int tag)
201 data_structure_lock_t *l = sm->data_structure_lock;
203 if (PREDICT_FALSE (l == 0))
206 thread_index = vlib_get_thread_index ();
207 if (l->lock && l->thread_index == thread_index)
216 while (__sync_lock_test_and_set (&l->lock, 1))
219 l->thread_index = thread_index;
224 stats_dslock_with_hint (int hint, int tag)
226 stats_main_t *sm = &stats_main;
227 dslock (sm, hint, tag);
231 dsunlock (stats_main_t * sm)
234 data_structure_lock_t *l = sm->data_structure_lock;
236 if (PREDICT_FALSE (l == 0))
239 thread_index = vlib_get_thread_index ();
240 ASSERT (l->lock && l->thread_index == thread_index);
246 CLIB_MEMORY_BARRIER ();
252 stats_dsunlock (int hint, int tag)
254 stats_main_t *sm = &stats_main;
258 static vpe_client_registration_t *
259 get_client_for_stat (u32 reg, u32 item, u32 client_index)
261 stats_main_t *sm = &stats_main;
262 vpe_client_stats_registration_t *registration;
265 /* Is there anything listening for item in that reg */
266 p = hash_get (sm->stats_registration_hash[reg], item);
271 /* If there is, is our client_index one of them */
272 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
273 p = hash_get (registration->client_hash, client_index);
278 return pool_elt_at_index (registration->clients, p[0]);
283 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
285 stats_main_t *sm = &stats_main;
286 vpe_client_stats_registration_t *registration;
287 vpe_client_registration_t *cr;
290 /* Is there anything listening for item in that reg */
291 p = hash_get (sm->stats_registration_hash[reg], item);
295 pool_get (sm->stats_registrations[reg], registration);
296 registration->item = item;
297 hash_set (sm->stats_registration_hash[reg], item,
298 registration - sm->stats_registrations[reg]);
302 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
305 p = hash_get (registration->client_hash, client->client_index);
309 pool_get (registration->clients, cr);
310 cr->client_index = client->client_index;
311 cr->client_pid = client->client_pid;
312 hash_set (registration->client_hash, cr->client_index,
313 cr - registration->clients);
316 return 1; //At least one client is doing something ... poll
320 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
322 stats_main_t *sm = &stats_main;
323 vpe_client_stats_registration_t *registration;
324 vpe_client_registration_t *client;
328 /* Clear the client first */
329 /* Is there anything listening for item in that reg */
330 p = hash_get (sm->stats_registration_hash[reg], item);
335 /* If there is, is our client_index one of them */
336 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
337 p = hash_get (registration->client_hash, client_index);
342 client = pool_elt_at_index (registration->clients, p[0]);
343 hash_unset (registration->client_hash, client->client_index);
344 pool_put (registration->clients, client);
346 /* Now check if that was the last client for that item */
347 if (0 == pool_elts (registration->clients))
349 hash_unset (sm->stats_registration_hash[reg], item);
350 pool_put (sm->stats_registrations[reg], registration);
355 /* Now check if that was the last item in any of the listened to stats */
356 for (i = 0; i < STATS_REG_N_IDX; i++)
358 elts += pool_elts (sm->stats_registrations[i]);
363 vpe_client_registration_t *
364 get_clients_for_stat (u32 reg, u32 item)
366 stats_main_t *sm = &stats_main;
367 vpe_client_registration_t *client, *clients = 0;
368 vpe_client_stats_registration_t *registration;
371 /* Is there anything listening for item in that reg */
372 p = hash_get (sm->stats_registration_hash[reg], item);
377 /* If there is, is our client_index one of them */
378 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
380 vec_reset_length (clients);
381 pool_foreach (client, registration->clients, (
383 vec_add1 (clients, *client);}
390 clear_client_reg (u32 ** registrations)
392 /* When registrations[x] is a vector of pool indices
393 here is a good place to clean up the pools
395 #define stats_reg(n) vec_free(registrations[IDX_##n]);
396 #include <vpp/stats/stats.reg>
399 vec_free (registrations);
403 init_client_reg (u32 ** registrations)
407 Initialise the stats registrations for each
408 type of stat a client can register for as well as
409 a vector of "interested" indexes.
410 Initially this is a u32 of either sw_if_index or fib_index
411 but eventually this should migrate to a pool_index (u32)
412 with a type specific pool that can include more complex things
413 such as timing and structured events.
415 vec_validate (registrations, STATS_REG_N_IDX);
416 #define stats_reg(n) \
417 vec_reset_length(registrations[IDX_##n]);
418 #include <vpp/stats/stats.reg>
422 When registrations[x] is a vector of pool indices, here
423 is a good place to init the pools.
425 return registrations;
429 enable_all_client_reg (u32 ** registrations)
433 Enable all stats known by adding
434 ~0 to the index vector. Eventually this
435 should be deprecated.
437 #define stats_reg(n) \
438 vec_add1(registrations[IDX_##n], ~0);
439 #include <vpp/stats/stats.reg>
441 return registrations;
445 do_simple_interface_counters (stats_main_t * sm)
447 vl_api_vnet_interface_simple_counters_t *mp = 0;
448 vnet_interface_main_t *im = sm->interface_main;
449 api_main_t *am = sm->api_main;
450 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
451 svm_queue_t *q = shmem_hdr->vl_input_queue;
452 vlib_simple_counter_main_t *cm;
453 u32 items_this_message = 0;
458 * Prevent interface registration from expanding / moving the vectors...
459 * That tends never to happen, so we can hold this lock for a while.
461 vnet_interface_counter_lock (im);
463 vec_foreach (cm, im->sw_if_counters)
465 n_counts = vlib_simple_counter_n_counters (cm);
466 for (i = 0; i < n_counts; i++)
470 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
473 mp = vl_msg_api_alloc_as_if_client
474 (sizeof (*mp) + items_this_message * sizeof (v));
475 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
476 mp->vnet_counter_type = cm - im->sw_if_counters;
477 mp->first_sw_if_index = htonl (i);
479 vp = (u64 *) mp->data;
481 v = vlib_get_simple_counter (cm, i);
482 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
485 if (mp->count == items_this_message)
487 mp->count = htonl (items_this_message);
488 /* Send to the main thread... */
489 vl_msg_api_send_shmem (q, (u8 *) & mp);
495 vnet_interface_counter_unlock (im);
499 handle_client_registration (vpe_client_registration_t * client, u32 stat,
500 u32 item, int enable_disable)
502 stats_main_t *sm = &stats_main;
503 vpe_client_registration_t *rp, _rp;
505 rp = get_client_for_stat (stat, item, client->client_index);
508 if (enable_disable == 0)
510 if (!rp) // No client to disable
512 clib_warning ("pid %d: already disabled for stats...",
517 clear_client_for_stat (stat, item, client->client_index);
524 rp->client_index = client->client_index;
525 rp->client_pid = client->client_pid;
526 sm->enable_poller = set_client_for_stat (stat, item, rp);
531 /**********************************
532 * ALL Interface Combined stats - to be deprecated
533 **********************************/
536 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
539 vl_api_want_interface_combined_stats_t_handler
540 (vl_api_want_interface_combined_stats_t * mp)
542 stats_main_t *sm = &stats_main;
543 vpe_client_registration_t rp;
544 vl_api_want_interface_combined_stats_reply_t *rmp;
547 vl_api_registration_t *reg;
550 swif = ~0; //Using same mechanism as _per_interface_
551 rp.client_index = mp->client_index;
552 rp.client_pid = mp->pid;
554 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
558 reg = vl_api_client_index_to_registration (mp->client_index);
562 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
567 rmp = vl_msg_api_alloc (sizeof (*rmp));
568 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
569 rmp->context = mp->context;
570 rmp->retval = retval;
572 vl_api_send_msg (reg, (u8 *) rmp);
576 vl_api_vnet_interface_combined_counters_t_handler
577 (vl_api_vnet_interface_combined_counters_t * mp)
579 vpe_client_registration_t *clients, client;
580 stats_main_t *sm = &stats_main;
581 svm_queue_t *q, *q_prev = NULL;
582 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
586 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
589 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
590 ~0 /*flag for all */ );
592 for (i = 0; i < vec_len (clients); i++)
595 q = vl_api_client_index_to_input_queue (client.client_index);
598 if (q_prev && (q_prev->cursize < q_prev->maxsize))
600 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
601 clib_memcpy (mp_copy, mp, mp_size);
602 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
609 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
612 if (q_prev && (q_prev->cursize < q_prev->maxsize))
614 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
618 vl_msg_api_free (mp);
623 do_combined_interface_counters (stats_main_t * sm)
625 vl_api_vnet_interface_combined_counters_t *mp = 0;
626 vnet_interface_main_t *im = sm->interface_main;
627 api_main_t *am = sm->api_main;
628 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
629 svm_queue_t *q = shmem_hdr->vl_input_queue;
630 vlib_combined_counter_main_t *cm;
631 u32 items_this_message = 0;
632 vlib_counter_t v, *vp = 0;
635 vnet_interface_counter_lock (im);
637 vec_foreach (cm, im->combined_sw_if_counters)
639 n_counts = vlib_combined_counter_n_counters (cm);
640 for (i = 0; i < n_counts; i++)
644 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
647 mp = vl_msg_api_alloc_as_if_client
648 (sizeof (*mp) + items_this_message * sizeof (v));
649 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
650 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
651 mp->first_sw_if_index = htonl (i);
653 vp = (vlib_counter_t *) mp->data;
655 vlib_get_combined_counter (cm, i, &v);
656 clib_mem_unaligned (&vp->packets, u64)
657 = clib_host_to_net_u64 (v.packets);
658 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
661 if (mp->count == items_this_message)
663 mp->count = htonl (items_this_message);
664 /* Send to the main thread... */
665 vl_msg_api_send_shmem (q, (u8 *) & mp);
671 vnet_interface_counter_unlock (im);
674 /**********************************
675 * Per Interface Combined stats
676 **********************************/
678 /* Request from client registering interfaces it wants */
680 vl_api_want_per_interface_combined_stats_t_handler
681 (vl_api_want_per_interface_combined_stats_t * mp)
683 stats_main_t *sm = &stats_main;
684 vpe_client_registration_t rp;
685 vl_api_want_per_interface_combined_stats_reply_t *rmp;
686 vlib_combined_counter_main_t *cm;
689 vl_api_registration_t *reg;
693 // Validate we have good sw_if_indexes before registering
694 for (i = 0; i < mp->num; i++)
696 swif = mp->sw_ifs[i];
698 /* Check its a real sw_if_index that the client is allowed to see */
701 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
703 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
709 for (i = 0; i < mp->num; i++)
711 swif = mp->sw_ifs[i];
713 rp.client_index = mp->client_index;
714 rp.client_pid = mp->pid;
715 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
716 swif, mp->enable_disable);
720 reg = vl_api_client_index_to_registration (mp->client_index);
723 for (i = 0; i < mp->num; i++)
725 swif = mp->sw_ifs[i];
727 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
733 rmp = vl_msg_api_alloc (sizeof (*rmp));
734 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
735 rmp->context = mp->context;
736 rmp->retval = retval;
738 vl_api_send_msg (reg, (u8 *) rmp);
741 /* Per Interface Combined distribution to client */
743 do_combined_per_interface_counters (stats_main_t * sm)
745 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
746 vnet_interface_main_t *im = sm->interface_main;
747 api_main_t *am = sm->api_main;
748 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
749 vl_api_registration_t *vl_reg;
750 vlib_combined_counter_main_t *cm;
752 * items_this_message will eventually be used to optimise the batching
753 * of per client messages for each stat. For now setting this to 1 then
754 * iterate. This will not affect API.
756 * FIXME instead of enqueueing here, this should be sent to a batch
757 * storer for per-client transmission. Each "mp" sent would be a single entry
758 * and if a client is listening to other sw_if_indexes for same, it would be
759 * appended to that *mp
761 u32 items_this_message = 1;
762 vnet_combined_counter_t *vp = 0;
766 vpe_client_stats_registration_t *reg;
767 vpe_client_registration_t *client;
768 u32 *sw_if_index = 0;
772 - capturing the timestamp of the counters "when VPP knew them" is important.
773 Less so is that the timing of the delivery to the control plane be in the same
776 i.e. As long as the control plane can delta messages from VPP and work out
777 velocity etc based on the timestamp, it can do so in a more "batch mode".
779 It would be beneficial to keep a "per-client" message queue, and then
780 batch all the stat messages for a client into one message, with
783 Given this particular API is for "per interface" one assumes that the scale
784 is less than the ~0 case, which the prior API is suited for.
786 vnet_interface_counter_lock (im);
788 timestamp = vlib_time_now (sm->vlib_main);
790 vec_reset_length (sm->regs_tmp);
794 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
796 vec_add1 (sm->regs_tmp, reg);
800 for (i = 0; i < vec_len (sm->regs_tmp); i++)
802 reg = sm->regs_tmp[i];
805 vnet_interface_counter_unlock (im);
806 do_combined_interface_counters (sm);
807 vnet_interface_counter_lock (im);
810 vec_reset_length (sm->clients_tmp);
813 pool_foreach (client, reg->clients, ({
814 vec_add1 (sm->clients_tmp, client);
818 //FIXME - should be doing non-variant part of mp here and managing
819 // any alloc per client in that vec_foreach
820 for (j = 0; j < vec_len (sm->clients_tmp); j++)
822 client = sm->clients_tmp[j];
824 vl_reg = vl_api_client_index_to_registration (client->client_index);
826 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
830 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
831 reg->item, client->client_index);
835 mp = vl_msg_api_alloc (sizeof (*mp) +
836 (items_this_message *
837 (sizeof (*vp) /* rx */ )));
839 // FIXME when optimising for items_this_message > 1 need to include a
840 // SIMPLE_INTERFACE_BATCH_SIZE check.
842 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
844 mp->count = items_this_message;
845 mp->timestamp = timestamp;
846 vp = (vnet_combined_counter_t *) mp->data;
848 vp->sw_if_index = htonl (reg->item);
850 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
851 vlib_get_combined_counter (cm, reg->item, &v);
852 clib_mem_unaligned (&vp->rx_packets, u64)
853 = clib_host_to_net_u64 (v.packets);
854 clib_mem_unaligned (&vp->rx_bytes, u64) =
855 clib_host_to_net_u64 (v.bytes);
858 /* TX vlib_counter_t packets/bytes */
859 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
860 vlib_get_combined_counter (cm, reg->item, &v);
861 clib_mem_unaligned (&vp->tx_packets, u64)
862 = clib_host_to_net_u64 (v.packets);
863 clib_mem_unaligned (&vp->tx_bytes, u64) =
864 clib_host_to_net_u64 (v.bytes);
866 vl_api_send_msg (vl_reg, (u8 *) mp);
870 vnet_interface_counter_unlock (im);
873 /**********************************
874 * Per Interface simple stats
875 **********************************/
877 /* Request from client registering interfaces it wants */
879 vl_api_want_per_interface_simple_stats_t_handler
880 (vl_api_want_per_interface_simple_stats_t * mp)
882 stats_main_t *sm = &stats_main;
883 vpe_client_registration_t rp;
884 vl_api_want_per_interface_simple_stats_reply_t *rmp;
885 vlib_simple_counter_main_t *cm;
888 vl_api_registration_t *reg;
892 for (i = 0; i < mp->num; i++)
894 swif = mp->sw_ifs[i];
896 /* Check its a real sw_if_index that the client is allowed to see */
899 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
901 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
907 for (i = 0; i < mp->num; i++)
909 swif = mp->sw_ifs[i];
911 rp.client_index = mp->client_index;
912 rp.client_pid = mp->pid;
913 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
914 swif, mp->enable_disable);
918 reg = vl_api_client_index_to_registration (mp->client_index);
920 /* Client may have disconnected abruptly, clean up */
923 for (i = 0; i < mp->num; i++)
925 swif = mp->sw_ifs[i];
927 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
935 rmp = vl_msg_api_alloc (sizeof (*rmp));
936 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
937 rmp->context = mp->context;
938 rmp->retval = retval;
940 vl_api_send_msg (reg, (u8 *) rmp);
943 /* Per Interface Simple distribution to client */
945 do_simple_per_interface_counters (stats_main_t * sm)
947 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
948 vnet_interface_main_t *im = sm->interface_main;
949 api_main_t *am = sm->api_main;
950 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
951 vl_api_registration_t *vl_reg;
952 vlib_simple_counter_main_t *cm;
954 * items_this_message will eventually be used to optimise the batching
955 * of per client messages for each stat. For now setting this to 1 then
956 * iterate. This will not affect API.
958 * FIXME instead of enqueueing here, this should be sent to a batch
959 * storer for per-client transmission. Each "mp" sent would be a single entry
960 * and if a client is listening to other sw_if_indexes for same, it would be
961 * appended to that *mp
963 u32 items_this_message = 1;
965 vpe_client_stats_registration_t *reg;
966 vpe_client_registration_t *client;
969 vnet_simple_counter_t *vp = 0;
974 - capturing the timestamp of the counters "when VPP knew them" is important.
975 Less so is that the timing of the delivery to the control plane be in the same
978 i.e. As long as the control plane can delta messages from VPP and work out
979 velocity etc based on the timestamp, it can do so in a more "batch mode".
981 It would be beneficial to keep a "per-client" message queue, and then
982 batch all the stat messages for a client into one message, with
985 Given this particular API is for "per interface" one assumes that the scale
986 is less than the ~0 case, which the prior API is suited for.
988 vnet_interface_counter_lock (im);
990 timestamp = vlib_time_now (sm->vlib_main);
992 vec_reset_length (sm->regs_tmp);
996 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], ({
997 vec_add1 (sm->regs_tmp, reg);
1001 for (i = 0; i < vec_len (sm->regs_tmp); i++)
1003 reg = sm->regs_tmp[i];
1004 if (reg->item == ~0)
1006 vnet_interface_counter_unlock (im);
1007 do_simple_interface_counters (sm);
1008 vnet_interface_counter_lock (im);
1011 vec_reset_length (sm->clients_tmp);
1014 pool_foreach (client, reg->clients, ({
1015 vec_add1 (sm->clients_tmp, client);
1019 //FIXME - should be doing non-variant part of mp here and managing
1020 // any alloc per client in that vec_foreach
1021 for (j = 0; j < vec_len (sm->clients_tmp); j++)
1023 client = sm->clients_tmp[j];
1024 vl_reg = vl_api_client_index_to_registration (client->client_index);
1026 /* Client may have disconnected abrubtly, clean up */
1030 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1031 reg->item, client->client_index);
1035 size = (sizeof (*mp) + (items_this_message * (sizeof (u64) * 10)));
1036 mp = vl_msg_api_alloc (size);
1037 // FIXME when optimising for items_this_message > 1 need to include a
1038 // SIMPLE_INTERFACE_BATCH_SIZE check.
1039 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1041 mp->count = items_this_message;
1042 mp->timestamp = timestamp;
1043 vp = (vnet_simple_counter_t *) mp->data;
1045 vp->sw_if_index = htonl (reg->item);
1047 //FIXME will be simpler with a preprocessor macro
1048 // VNET_INTERFACE_COUNTER_DROP
1049 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1050 v = vlib_get_simple_counter (cm, reg->item);
1051 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1053 // VNET_INTERFACE_COUNTER_PUNT
1054 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1055 v = vlib_get_simple_counter (cm, reg->item);
1056 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1058 // VNET_INTERFACE_COUNTER_IP4
1059 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1060 v = vlib_get_simple_counter (cm, reg->item);
1061 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1063 //VNET_INTERFACE_COUNTER_IP6
1064 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1065 v = vlib_get_simple_counter (cm, reg->item);
1066 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1068 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1069 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1070 v = vlib_get_simple_counter (cm, reg->item);
1071 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1072 clib_host_to_net_u64 (v);
1074 //VNET_INTERFACE_COUNTER_RX_MISS
1075 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1076 v = vlib_get_simple_counter (cm, reg->item);
1077 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1079 //VNET_INTERFACE_COUNTER_RX_ERROR
1080 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1081 v = vlib_get_simple_counter (cm, reg->item);
1082 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1084 //VNET_INTERFACE_COUNTER_TX_ERROR
1085 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1086 v = vlib_get_simple_counter (cm, reg->item);
1087 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1089 //VNET_INTERFACE_COUNTER_MPLS
1090 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1091 v = vlib_get_simple_counter (cm, reg->item);
1092 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1094 vl_api_send_msg (vl_reg, (u8 *) mp);
1098 vnet_interface_counter_unlock (im);
1101 /**********************************
1103 **********************************/
1106 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1108 struct timespec _req, *req = &_req;
1109 struct timespec _rem, *rem = &_rem;
1112 req->tv_nsec = nsec;
1115 if (nanosleep (req, rem) == 0)
1120 clib_unix_warning ("nanosleep");
1126 * @brief The context passed when collecting adjacency counters
1128 typedef struct ip4_nbr_stats_ctx_t_
1131 * The SW IF index all these adjs belong to
1136 * A vector of ip4 nbr counters
1138 vl_api_ip4_nbr_counter_t *counters;
1139 } ip4_nbr_stats_ctx_t;
1141 static adj_walk_rc_t
1142 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1144 vl_api_ip4_nbr_counter_t *vl_counter;
1145 vlib_counter_t adj_counter;
1146 ip4_nbr_stats_ctx_t *ctx;
1147 ip_adjacency_t *adj;
1150 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1152 if (0 != adj_counter.packets)
1154 vec_add2 (ctx->counters, vl_counter, 1);
1157 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1158 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1159 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1160 vl_counter->link_type = adj->ia_link;
1162 return (ADJ_WALK_RC_CONTINUE);
1165 #define MIN(x,y) (((x)<(y))?(x):(y))
1168 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1170 api_main_t *am = sm->api_main;
1171 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1172 svm_queue_t *q = shmem_hdr->vl_input_queue;
1173 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1177 * If the walk context has counters, which may be left over from the last
1178 * suspend, then we continue from there.
1180 while (0 != vec_len (ctx->counters))
1182 u32 n_items = MIN (vec_len (ctx->counters),
1183 IP4_FIB_COUNTER_BATCH_SIZE);
1186 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1188 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1191 (vl_api_ip4_nbr_counter_t)));
1192 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1193 mp->count = ntohl (n_items);
1194 mp->sw_if_index = ntohl (ctx->sw_if_index);
1199 * copy the counters from the back of the context, then we can easily
1200 * 'erase' them by resetting the vector length.
1201 * The order we push the stats to the caller is not important.
1204 &ctx->counters[vec_len (ctx->counters) - n_items],
1205 n_items * sizeof (*ctx->counters));
1207 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1213 pause = svm_queue_is_full (q);
1215 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1216 svm_queue_unlock (q);
1220 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1221 STATS_RELEASE_DELAY_NS);
1226 do_ip4_nbr_counters (stats_main_t * sm)
1228 vnet_main_t *vnm = vnet_get_main ();
1229 vnet_interface_main_t *im = &vnm->interface_main;
1230 vnet_sw_interface_t *si;
1232 ip4_nbr_stats_ctx_t ctx = {
1238 pool_foreach (si, im->sw_interfaces,
1241 * update the interface we are now concerned with
1243 ctx.sw_if_index = si->sw_if_index;
1246 * we are about to walk another interface, so we shouldn't have any pending
1249 ASSERT(ctx.counters == NULL);
1252 * visit each neighbour adjacency on the interface and collect
1253 * its current stats.
1254 * Because we hold the lock the walk is synchronous, so safe to routing
1255 * updates. It's limited in work by the number of adjacenies on an
1256 * interface, which is typically not huge.
1258 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1259 adj_nbr_walk (si->sw_if_index,
1266 * if this interface has some adjacencies with counters then ship them,
1267 * else continue to the next interface.
1269 if (NULL != ctx.counters)
1271 ip4_nbr_ship(sm, &ctx);
1278 * @brief The context passed when collecting adjacency counters
1280 typedef struct ip6_nbr_stats_ctx_t_
1283 * The SW IF index all these adjs belong to
1288 * A vector of ip6 nbr counters
1290 vl_api_ip6_nbr_counter_t *counters;
1291 } ip6_nbr_stats_ctx_t;
1293 static adj_walk_rc_t
1294 ip6_nbr_stats_cb (adj_index_t ai,
1297 vl_api_ip6_nbr_counter_t *vl_counter;
1298 vlib_counter_t adj_counter;
1299 ip6_nbr_stats_ctx_t *ctx;
1300 ip_adjacency_t *adj;
1303 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1305 if (0 != adj_counter.packets)
1307 vec_add2(ctx->counters, vl_counter, 1);
1310 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1311 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1312 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1313 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1314 vl_counter->link_type = adj->ia_link;
1316 return (ADJ_WALK_RC_CONTINUE);
1319 #define MIN(x,y) (((x)<(y))?(x):(y))
1322 ip6_nbr_ship (stats_main_t * sm,
1323 ip6_nbr_stats_ctx_t *ctx)
1325 api_main_t *am = sm->api_main;
1326 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1327 svm_queue_t *q = shmem_hdr->vl_input_queue;
1328 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1332 * If the walk context has counters, which may be left over from the last
1333 * suspend, then we continue from there.
1335 while (0 != vec_len(ctx->counters))
1337 u32 n_items = MIN (vec_len (ctx->counters),
1338 IP6_FIB_COUNTER_BATCH_SIZE);
1341 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1343 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1346 (vl_api_ip6_nbr_counter_t)));
1347 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1348 mp->count = ntohl (n_items);
1349 mp->sw_if_index = ntohl (ctx->sw_if_index);
1354 * copy the counters from the back of the context, then we can easily
1355 * 'erase' them by resetting the vector length.
1356 * The order we push the stats to the caller is not important.
1359 &ctx->counters[vec_len (ctx->counters) - n_items],
1360 n_items * sizeof (*ctx->counters));
1362 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1368 pause = svm_queue_is_full (q);
1370 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1371 svm_queue_unlock (q);
1375 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1376 STATS_RELEASE_DELAY_NS);
1381 do_ip6_nbr_counters (stats_main_t * sm)
1383 vnet_main_t *vnm = vnet_get_main ();
1384 vnet_interface_main_t *im = &vnm->interface_main;
1385 vnet_sw_interface_t *si;
1387 ip6_nbr_stats_ctx_t ctx = {
1393 pool_foreach (si, im->sw_interfaces,
1396 * update the interface we are now concerned with
1398 ctx.sw_if_index = si->sw_if_index;
1401 * we are about to walk another interface, so we shouldn't have any pending
1404 ASSERT(ctx.counters == NULL);
1407 * visit each neighbour adjacency on the interface and collect
1408 * its current stats.
1409 * Because we hold the lock the walk is synchronous, so safe to routing
1410 * updates. It's limited in work by the number of adjacenies on an
1411 * interface, which is typically not huge.
1413 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1414 adj_nbr_walk (si->sw_if_index,
1421 * if this interface has some adjacencies with counters then ship them,
1422 * else continue to the next interface.
1424 if (NULL != ctx.counters)
1426 ip6_nbr_ship(sm, &ctx);
1433 do_ip4_fib_counters (stats_main_t * sm)
1435 ip4_main_t *im4 = &ip4_main;
1436 api_main_t *am = sm->api_main;
1437 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1438 svm_queue_t *q = shmem_hdr->vl_input_queue;
1442 do_ip46_fibs_t *do_fibs;
1443 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1444 u32 items_this_message;
1445 vl_api_ip4_fib_counter_t *ctrp = 0;
1446 u32 start_at_fib_index = 0;
1449 do_fibs = &sm->do_ip46_fibs;
1452 vec_reset_length (do_fibs->fibs);
1454 pool_foreach (fib, im4->fibs,
1455 ({vec_add1(do_fibs->fibs,fib);}));
1459 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1461 fib = do_fibs->fibs[j];
1462 /* We may have bailed out due to control-plane activity */
1463 while ((fib - im4->fibs) < start_at_fib_index)
1466 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1470 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1471 mp = vl_msg_api_alloc_as_if_client
1473 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1474 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1476 mp->vrf_id = ntohl (fib->ft_table_id);
1477 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1481 /* happens if the last FIB was empty... */
1482 ASSERT (mp->count == 0);
1483 mp->vrf_id = ntohl (fib->ft_table_id);
1486 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1488 vec_reset_length (do_fibs->ip4routes);
1489 vec_reset_length (do_fibs->results);
1491 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1493 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1497 vec_reset_length (do_fibs->pvec);
1499 x.address_length = i;
1501 hash_foreach_pair (p, hash, (
1503 vec_add1 (do_fibs->pvec, p);}
1505 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1507 p = do_fibs->pvec[k];
1508 x.address.data_u32 = p->key;
1509 x.index = p->value[0];
1511 vec_add1 (do_fibs->ip4routes, x);
1512 if (sm->data_structure_lock->release_hint)
1514 start_at_fib_index = fib - im4->fibs;
1516 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1517 STATS_RELEASE_DELAY_NS);
1519 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1525 vec_foreach (r, do_fibs->ip4routes)
1528 const dpo_id_t *dpo_id;
1531 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1532 index = (u32) dpo_id->dpoi_index;
1534 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1537 * If it has actually
1538 * seen at least one packet, send it.
1543 /* already in net byte order */
1544 ctrp->address = r->address.as_u32;
1545 ctrp->address_length = r->address_length;
1546 ctrp->packets = clib_host_to_net_u64 (c.packets);
1547 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1551 if (mp->count == items_this_message)
1553 mp->count = htonl (items_this_message);
1555 * If the main thread's input queue is stuffed,
1556 * drop the data structure lock (which the main thread
1557 * may want), and take a pause.
1560 if (svm_queue_is_full (q))
1563 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1564 svm_queue_unlock (q);
1566 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1567 STATS_RELEASE_DELAY_NS);
1570 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1571 svm_queue_unlock (q);
1573 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1574 mp = vl_msg_api_alloc_as_if_client
1576 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1577 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1579 mp->vrf_id = ntohl (fib->ft_table_id);
1580 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1582 } /* for each (mp or single) adj */
1583 if (sm->data_structure_lock->release_hint)
1585 start_at_fib_index = fib - im4->fibs;
1587 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1589 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1592 } /* vec_foreach (routes) */
1596 /* Flush any data from this fib */
1599 mp->count = htonl (mp->count);
1600 vl_msg_api_send_shmem (q, (u8 *) & mp);
1605 /* If e.g. the last FIB had no reportable routes, free the buffer */
1607 vl_msg_api_free (mp);
1611 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1613 stats_main_t *sm = ctx;
1614 do_ip46_fibs_t *do_fibs;
1615 mfib_entry_t *entry;
1617 do_fibs = &sm->do_ip46_fibs;
1618 entry = mfib_entry_get (fei);
1620 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1626 do_ip4_mfib_counters (stats_main_t * sm)
1628 ip4_main_t *im4 = &ip4_main;
1629 api_main_t *am = sm->api_main;
1630 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1631 svm_queue_t *q = shmem_hdr->vl_input_queue;
1634 do_ip46_fibs_t *do_fibs;
1635 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1636 u32 items_this_message;
1637 vl_api_ip4_mfib_counter_t *ctrp = 0;
1638 u32 start_at_mfib_index = 0;
1641 do_fibs = &sm->do_ip46_fibs;
1643 vec_reset_length (do_fibs->mfibs);
1645 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1648 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1650 mfib = do_fibs->mfibs[j];
1651 /* We may have bailed out due to control-plane activity */
1652 while ((mfib - im4->mfibs) < start_at_mfib_index)
1657 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1658 mp = vl_msg_api_alloc_as_if_client
1660 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1661 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1663 mp->vrf_id = ntohl (mfib->mft_table_id);
1664 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1668 /* happens if the last MFIB was empty... */
1669 ASSERT (mp->count == 0);
1670 mp->vrf_id = ntohl (mfib->mft_table_id);
1673 vec_reset_length (do_fibs->mroutes);
1676 * walk the table with table updates blocked
1678 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1680 mfib_table_walk (mfib->mft_index,
1681 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1684 vec_foreach (pfx, do_fibs->mroutes)
1686 const dpo_id_t *dpo_id;
1687 fib_node_index_t mfei;
1692 * re-lookup the entry, since we suspend during the collection
1694 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1696 if (FIB_NODE_INDEX_INVALID == mfei)
1699 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1700 index = (u32) dpo_id->dpoi_index;
1702 vlib_get_combined_counter (&replicate_main.repm_counters,
1703 dpo_id->dpoi_index, &c);
1705 * If it has seen at least one packet, send it.
1709 /* already in net byte order */
1710 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1711 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1712 ctrp->group_length = pfx->fp_len;
1713 ctrp->packets = clib_host_to_net_u64 (c.packets);
1714 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1718 if (mp->count == items_this_message)
1720 mp->count = htonl (items_this_message);
1722 * If the main thread's input queue is stuffed,
1723 * drop the data structure lock (which the main thread
1724 * may want), and take a pause.
1728 while (svm_queue_is_full (q))
1730 svm_queue_unlock (q);
1731 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1732 STATS_RELEASE_DELAY_NS);
1735 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1736 svm_queue_unlock (q);
1738 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1739 mp = vl_msg_api_alloc_as_if_client
1741 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1742 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1744 mp->vrf_id = ntohl (mfib->mft_table_id);
1745 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1750 /* Flush any data from this mfib */
1753 mp->count = htonl (mp->count);
1754 vl_msg_api_send_shmem (q, (u8 *) & mp);
1759 /* If e.g. the last FIB had no reportable routes, free the buffer */
1761 vl_msg_api_free (mp);
1765 do_ip6_mfib_counters (stats_main_t * sm)
1767 ip6_main_t *im6 = &ip6_main;
1768 api_main_t *am = sm->api_main;
1769 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1770 svm_queue_t *q = shmem_hdr->vl_input_queue;
1773 do_ip46_fibs_t *do_fibs;
1774 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1775 u32 items_this_message;
1776 vl_api_ip6_mfib_counter_t *ctrp = 0;
1777 u32 start_at_mfib_index = 0;
1780 do_fibs = &sm->do_ip46_fibs;
1782 vec_reset_length (do_fibs->mfibs);
1784 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1787 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1789 mfib = do_fibs->mfibs[j];
1790 /* We may have bailed out due to control-plane activity */
1791 while ((mfib - im6->mfibs) < start_at_mfib_index)
1796 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1797 mp = vl_msg_api_alloc_as_if_client
1799 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1800 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1802 mp->vrf_id = ntohl (mfib->mft_table_id);
1803 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1807 /* happens if the last MFIB was empty... */
1808 ASSERT (mp->count == 0);
1809 mp->vrf_id = ntohl (mfib->mft_table_id);
1812 vec_reset_length (do_fibs->mroutes);
1815 * walk the table with table updates blocked
1817 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1819 mfib_table_walk (mfib->mft_index,
1820 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1823 vec_foreach (pfx, do_fibs->mroutes)
1825 const dpo_id_t *dpo_id;
1826 fib_node_index_t mfei;
1831 * re-lookup the entry, since we suspend during the collection
1833 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1835 if (FIB_NODE_INDEX_INVALID == mfei)
1838 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1839 index = (u32) dpo_id->dpoi_index;
1841 vlib_get_combined_counter (&replicate_main.repm_counters,
1842 dpo_id->dpoi_index, &c);
1844 * If it has seen at least one packet, send it.
1848 /* already in net byte order */
1849 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1850 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1851 ctrp->group_length = pfx->fp_len;
1852 ctrp->packets = clib_host_to_net_u64 (c.packets);
1853 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1857 if (mp->count == items_this_message)
1859 mp->count = htonl (items_this_message);
1861 * If the main thread's input queue is stuffed,
1862 * drop the data structure lock (which the main thread
1863 * may want), and take a pause.
1867 while (svm_queue_is_full (q))
1869 svm_queue_unlock (q);
1870 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1871 STATS_RELEASE_DELAY_NS);
1874 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1875 svm_queue_unlock (q);
1877 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1878 mp = vl_msg_api_alloc_as_if_client
1880 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1881 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1883 mp->vrf_id = ntohl (mfib->mft_table_id);
1884 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1889 /* Flush any data from this mfib */
1892 mp->count = htonl (mp->count);
1893 vl_msg_api_send_shmem (q, (u8 *) & mp);
1898 /* If e.g. the last FIB had no reportable routes, free the buffer */
1900 vl_msg_api_free (mp);
1906 ip6_route_t **routep;
1908 } add_routes_in_fib_arg_t;
1911 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1913 add_routes_in_fib_arg_t *ap = arg;
1914 stats_main_t *sm = ap->sm;
1916 if (sm->data_structure_lock->release_hint)
1917 clib_longjmp (&sm->jmp_buf, 1);
1919 if (kvp->key[2] >> 32 == ap->fib_index)
1921 ip6_address_t *addr;
1923 addr = (ip6_address_t *) kvp;
1924 vec_add2 (*ap->routep, r, 1);
1925 r->address = addr[0];
1926 r->address_length = kvp->key[2] & 0xFF;
1927 r->index = kvp->value;
1932 do_ip6_fib_counters (stats_main_t * sm)
1934 ip6_main_t *im6 = &ip6_main;
1935 api_main_t *am = sm->api_main;
1936 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1937 svm_queue_t *q = shmem_hdr->vl_input_queue;
1940 do_ip46_fibs_t *do_fibs;
1941 vl_api_vnet_ip6_fib_counters_t *mp = 0;
1942 u32 items_this_message;
1943 vl_api_ip6_fib_counter_t *ctrp = 0;
1944 u32 start_at_fib_index = 0;
1945 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
1946 add_routes_in_fib_arg_t _a, *a = &_a;
1949 do_fibs = &sm->do_ip46_fibs;
1951 vec_reset_length (do_fibs->fibs);
1953 pool_foreach (fib, im6->fibs,
1954 ({vec_add1(do_fibs->fibs,fib);}));
1958 for (i = 0; i < vec_len (do_fibs->fibs); i++)
1960 fib = do_fibs->fibs[i];
1961 /* We may have bailed out due to control-plane activity */
1962 while ((fib - im6->fibs) < start_at_fib_index)
1967 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1968 mp = vl_msg_api_alloc_as_if_client
1970 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1971 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1973 mp->vrf_id = ntohl (fib->ft_table_id);
1974 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1977 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1979 vec_reset_length (do_fibs->ip6routes);
1980 vec_reset_length (do_fibs->results);
1982 a->fib_index = fib - im6->fibs;
1983 a->routep = &do_fibs->ip6routes;
1986 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
1988 start_at_fib_index = fib - im6->fibs;
1989 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
1994 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1995 STATS_RELEASE_DELAY_NS);
1997 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2001 vec_foreach (r, do_fibs->ip6routes)
2005 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2008 * If it has actually
2009 * seen at least one packet, send it.
2013 /* already in net byte order */
2014 ctrp->address[0] = r->address.as_u64[0];
2015 ctrp->address[1] = r->address.as_u64[1];
2016 ctrp->address_length = (u8) r->address_length;
2017 ctrp->packets = clib_host_to_net_u64 (c.packets);
2018 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2022 if (mp->count == items_this_message)
2024 mp->count = htonl (items_this_message);
2026 * If the main thread's input queue is stuffed,
2027 * drop the data structure lock (which the main thread
2028 * may want), and take a pause.
2031 if (svm_queue_is_full (q))
2034 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2035 svm_queue_unlock (q);
2037 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2038 STATS_RELEASE_DELAY_NS);
2041 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2042 svm_queue_unlock (q);
2044 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2045 mp = vl_msg_api_alloc_as_if_client
2047 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2048 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2050 mp->vrf_id = ntohl (fib->ft_table_id);
2051 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2055 if (sm->data_structure_lock->release_hint)
2057 start_at_fib_index = fib - im6->fibs;
2059 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2061 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2064 } /* vec_foreach (routes) */
2068 /* Flush any data from this fib */
2071 mp->count = htonl (mp->count);
2072 vl_msg_api_send_shmem (q, (u8 *) & mp);
2077 /* If e.g. the last FIB had no reportable routes, free the buffer */
2079 vl_msg_api_free (mp);
2083 stats_thread_fn (void *arg)
2085 stats_main_t *sm = &stats_main;
2086 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2087 vlib_thread_main_t *tm = vlib_get_thread_main ();
2089 /* stats thread wants no signals. */
2093 pthread_sigmask (SIG_SETMASK, &s, 0);
2096 if (vec_len (tm->thread_prefix))
2097 vlib_set_thread_name ((char *)
2098 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2100 clib_mem_set_heap (w->thread_mheap);
2104 /* 10 second poll interval */
2105 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
2107 if (!(sm->enable_poller))
2112 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2113 do_combined_per_interface_counters (sm);
2116 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2117 do_simple_per_interface_counters (sm);
2119 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2120 do_ip4_fib_counters (sm);
2122 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2123 do_ip6_fib_counters (sm);
2125 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2126 do_ip4_mfib_counters (sm);
2128 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2129 do_ip6_mfib_counters (sm);
2131 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2132 do_ip4_nbr_counters (sm);
2134 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2135 do_ip6_nbr_counters (sm);
2140 vl_api_vnet_interface_simple_counters_t_handler
2141 (vl_api_vnet_interface_simple_counters_t * mp)
2143 vpe_client_registration_t *clients, client;
2144 stats_main_t *sm = &stats_main;
2145 svm_queue_t *q, *q_prev = NULL;
2146 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2150 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2153 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2154 ~0 /*flag for all */ );
2156 for (i = 0; i < vec_len (clients); i++)
2158 client = clients[i];
2159 q = vl_api_client_index_to_input_queue (client.client_index);
2162 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2164 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2165 clib_memcpy (mp_copy, mp, mp_size);
2166 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2174 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2175 client.client_index);
2181 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2184 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2186 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2190 vl_msg_api_free (mp);
2195 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2197 stats_main_t *sm = &stats_main;
2198 svm_queue_t *q, *q_prev = NULL;
2199 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2201 vpe_client_registration_t *clients, client;
2204 mp_size = sizeof (*mp_copy) +
2205 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2208 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2210 for (i = 0; i < vec_len (clients); i++)
2212 client = clients[i];
2213 q = vl_api_client_index_to_input_queue (client.client_index);
2216 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2218 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2219 clib_memcpy (mp_copy, mp, mp_size);
2220 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2227 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2228 ~0, client.client_index);
2233 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2235 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2239 vl_msg_api_free (mp);
2244 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2246 stats_main_t *sm = &stats_main;
2247 svm_queue_t *q, *q_prev = NULL;
2248 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2250 vpe_client_registration_t *clients, client;
2253 mp_size = sizeof (*mp_copy) +
2254 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2257 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2259 for (i = 0; i < vec_len (clients); i++)
2261 client = clients[i];
2262 q = vl_api_client_index_to_input_queue (client.client_index);
2265 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2267 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2268 clib_memcpy (mp_copy, mp, mp_size);
2269 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2276 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2277 ~0, client.client_index);
2283 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2285 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2289 vl_msg_api_free (mp);
2294 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2296 stats_main_t *sm = &stats_main;
2297 svm_queue_t *q, *q_prev = NULL;
2298 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2300 vpe_client_registration_t *clients, client;
2303 mp_size = sizeof (*mp_copy) +
2304 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2307 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2309 for (i = 0; i < vec_len (clients); i++)
2311 client = clients[i];
2312 q = vl_api_client_index_to_input_queue (client.client_index);
2315 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2317 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2318 clib_memcpy (mp_copy, mp, mp_size);
2319 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2326 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2327 ~0, client.client_index);
2332 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2334 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2338 vl_msg_api_free (mp);
2343 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2345 stats_main_t *sm = &stats_main;
2346 svm_queue_t *q, *q_prev = NULL;
2347 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2349 vpe_client_registration_t *clients, client;
2352 mp_size = sizeof (*mp_copy) +
2353 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2356 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2358 for (i = 0; i < vec_len (clients); i++)
2360 client = clients[i];
2361 q = vl_api_client_index_to_input_queue (client.client_index);
2364 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2366 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2367 clib_memcpy (mp_copy, mp, mp_size);
2368 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2375 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2376 ~0, client.client_index);
2381 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2383 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2387 vl_msg_api_free (mp);
2392 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2394 stats_main_t *sm = &stats_main;
2395 vpe_client_registration_t rp;
2396 vl_api_want_stats_reply_t *rmp;
2400 vl_api_registration_t *reg;
2402 item = ~0; //"ALL THE THINGS IN THE THINGS
2403 rp.client_index = mp->client_index;
2404 rp.client_pid = mp->pid;
2406 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2407 item, mp->enable_disable);
2409 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2410 item, mp->enable_disable);
2412 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2413 item, mp->enable_disable);
2415 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2416 item, mp->enable_disable);
2418 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2419 item, mp->enable_disable);
2421 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2422 item, mp->enable_disable);
2425 reg = vl_api_client_index_to_registration (mp->client_index);
2429 rmp = vl_msg_api_alloc (sizeof (*rmp));
2430 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2431 rmp->context = mp->context;
2432 rmp->retval = retval;
2434 vl_api_send_msg (reg, (u8 *) rmp);
2438 vl_api_want_interface_simple_stats_t_handler
2439 (vl_api_want_interface_simple_stats_t * mp)
2441 stats_main_t *sm = &stats_main;
2442 vpe_client_registration_t rp;
2443 vl_api_want_interface_simple_stats_reply_t *rmp;
2447 vl_api_registration_t *reg;
2449 swif = ~0; //Using same mechanism as _per_interface_
2450 rp.client_index = mp->client_index;
2451 rp.client_pid = mp->pid;
2453 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2454 mp->enable_disable);
2457 reg = vl_api_client_index_to_registration (mp->client_index);
2462 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2467 rmp = vl_msg_api_alloc (sizeof (*rmp));
2468 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2469 rmp->context = mp->context;
2470 rmp->retval = retval;
2472 vl_api_send_msg (reg, (u8 *) rmp);
2477 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2479 stats_main_t *sm = &stats_main;
2480 vpe_client_registration_t rp;
2481 vl_api_want_ip4_fib_stats_reply_t *rmp;
2484 vl_api_registration_t *reg;
2487 fib = ~0; //Using same mechanism as _per_interface_
2488 rp.client_index = mp->client_index;
2489 rp.client_pid = mp->pid;
2491 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2492 mp->enable_disable);
2495 reg = vl_api_client_index_to_registration (mp->client_index);
2499 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2500 fib, mp->client_index);
2504 rmp = vl_msg_api_alloc (sizeof (*rmp));
2505 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2506 rmp->context = mp->context;
2507 rmp->retval = retval;
2509 vl_api_send_msg (reg, (u8 *) rmp);
2513 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2515 stats_main_t *sm = &stats_main;
2516 vpe_client_registration_t rp;
2517 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2520 vl_api_registration_t *reg;
2523 mfib = ~0; //Using same mechanism as _per_interface_
2524 rp.client_index = mp->client_index;
2525 rp.client_pid = mp->pid;
2527 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2528 mp->enable_disable);
2531 reg = vl_api_client_index_to_registration (mp->client_index);
2534 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2535 mfib, mp->client_index);
2539 rmp = vl_msg_api_alloc (sizeof (*rmp));
2540 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2541 rmp->context = mp->context;
2542 rmp->retval = retval;
2544 vl_api_send_msg (reg, (u8 *) rmp);
2548 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2550 stats_main_t *sm = &stats_main;
2551 vpe_client_registration_t rp;
2552 vl_api_want_ip4_fib_stats_reply_t *rmp;
2555 vl_api_registration_t *reg;
2558 fib = ~0; //Using same mechanism as _per_interface_
2559 rp.client_index = mp->client_index;
2560 rp.client_pid = mp->pid;
2562 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2563 mp->enable_disable);
2566 reg = vl_api_client_index_to_registration (mp->client_index);
2569 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2570 fib, mp->client_index);
2574 rmp = vl_msg_api_alloc (sizeof (*rmp));
2575 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2576 rmp->context = mp->context;
2577 rmp->retval = retval;
2579 vl_api_send_msg (reg, (u8 *) rmp);
2583 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2585 stats_main_t *sm = &stats_main;
2586 vpe_client_registration_t rp;
2587 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2590 vl_api_registration_t *reg;
2593 mfib = ~0; //Using same mechanism as _per_interface_
2594 rp.client_index = mp->client_index;
2595 rp.client_pid = mp->pid;
2597 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2598 mp->enable_disable);
2601 reg = vl_api_client_index_to_registration (mp->client_index);
2604 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2605 mfib, mp->client_index);
2609 rmp = vl_msg_api_alloc (sizeof (*rmp));
2610 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2611 rmp->context = mp->context;
2612 rmp->retval = retval;
2614 vl_api_send_msg (reg, (u8 *) rmp);
2617 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2619 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2624 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2629 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2631 stats_main_t *sm = &stats_main;
2632 vnet_interface_main_t *im = sm->interface_main;
2633 vl_api_vnet_get_summary_stats_reply_t *rmp;
2634 vlib_combined_counter_main_t *cm;
2637 u64 total_pkts[VLIB_N_RX_TX];
2638 u64 total_bytes[VLIB_N_RX_TX];
2639 vl_api_registration_t *reg;
2641 reg = vl_api_client_index_to_registration (mp->client_index);
2645 rmp = vl_msg_api_alloc (sizeof (*rmp));
2646 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2647 rmp->context = mp->context;
2650 memset (total_pkts, 0, sizeof (total_pkts));
2651 memset (total_bytes, 0, sizeof (total_bytes));
2653 vnet_interface_counter_lock (im);
2655 vec_foreach (cm, im->combined_sw_if_counters)
2657 which = cm - im->combined_sw_if_counters;
2659 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2661 vlib_get_combined_counter (cm, i, &v);
2662 total_pkts[which] += v.packets;
2663 total_bytes[which] += v.bytes;
2666 vnet_interface_counter_unlock (im);
2668 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2669 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2670 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2671 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2673 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2675 vl_api_send_msg (reg, (u8 *) rmp);
2679 stats_memclnt_delete_callback (u32 client_index)
2681 vpe_client_stats_registration_t *rp;
2682 stats_main_t *sm = &stats_main;
2686 /* p = hash_get (sm->stats_registration_hash, client_index); */
2689 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2690 /* pool_put (sm->stats_registrations, rp); */
2691 /* hash_unset (sm->stats_registration_hash, client_index); */
2697 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2698 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2699 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2700 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2701 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2702 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2703 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2704 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2705 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2706 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2707 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2708 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2710 static clib_error_t *
2711 stats_init (vlib_main_t * vm)
2713 stats_main_t *sm = &stats_main;
2714 api_main_t *am = &api_main;
2715 void *vlib_worker_thread_bootstrap_fn (void *arg);
2718 sm->vnet_main = vnet_get_main ();
2719 sm->interface_main = &vnet_get_main ()->interface_main;
2721 sm->stats_poll_interval_in_seconds = 10;
2722 sm->data_structure_lock =
2723 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
2724 CLIB_CACHE_LINE_BYTES);
2725 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
2728 vl_msg_api_set_handlers(VL_API_##N, #n, \
2729 vl_api_##n##_t_handler, \
2731 vl_api_##n##_t_endian, \
2732 vl_api_##n##_t_print, \
2733 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
2737 /* tell the msg infra not to free these messages... */
2738 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
2739 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
2740 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
2741 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
2742 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
2743 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
2746 * Set up the (msg_name, crc, message-id) table
2748 setup_message_id_table (am);
2750 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
2751 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
2752 #define stats_reg(n) \
2753 sm->stats_registrations[IDX_##n] = 0; \
2754 sm->stats_registration_hash[IDX_##n] = 0;
2755 #include <vpp/stats/stats.reg>
2761 VLIB_INIT_FUNCTION (stats_init);
2764 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
2766 .function = stats_thread_fn,
2769 .no_data_structure_clone = 1,
2775 * fd.io coding-style-patch-verification: ON
2778 * eval: (c-set-style "gnu")