2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
24 stats_main_t stats_main;
26 #include <vnet/ip/ip.h>
28 #include <vpp/api/vpe_msg_enum.h>
31 #define f64_print(a,b)
33 #define vl_typedefs /* define message structures */
34 #include <vpp/api/vpe_all_api_h.h>
37 #define vl_endianfun /* define message structures */
38 #include <vpp/api/vpe_all_api_h.h>
41 /* instantiate all the print functions we know about */
42 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
44 #include <vpp/api/vpe_all_api_h.h>
47 #define foreach_stats_msg \
48 _(WANT_STATS, want_stats) \
49 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
50 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
51 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
52 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
53 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
55 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
56 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
57 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
58 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
59 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
60 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
61 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
62 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
63 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
64 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
65 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats)
68 #define vl_msg_name_crc_list
69 #include <vpp/stats/stats.api.h>
70 #undef vl_msg_name_crc_list
73 setup_message_id_table (api_main_t * am)
76 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
77 foreach_vl_msg_name_crc_stats;
81 /* These constants ensure msg sizes <= 1024, aka ring allocation */
82 #define SIMPLE_COUNTER_BATCH_SIZE 126
83 #define COMBINED_COUNTER_BATCH_SIZE 63
84 #define IP4_FIB_COUNTER_BATCH_SIZE 48
85 #define IP6_FIB_COUNTER_BATCH_SIZE 30
86 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
87 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
90 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
94 format_vnet_interface_combined_counters (u8 * s, va_list * args)
96 stats_main_t *sm = &stats_main;
97 vl_api_vnet_interface_combined_counters_t *mp =
98 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
101 u32 count, sw_if_index;
103 count = ntohl (mp->count);
104 sw_if_index = ntohl (mp->first_sw_if_index);
108 vp = (vlib_counter_t *) mp->data;
110 switch (mp->vnet_counter_type)
112 case VNET_INTERFACE_COUNTER_RX:
115 case VNET_INTERFACE_COUNTER_TX:
119 counter_name = "bogus";
122 for (i = 0; i < count; i++)
124 packets = clib_mem_unaligned (&vp->packets, u64);
125 packets = clib_net_to_host_u64 (packets);
126 bytes = clib_mem_unaligned (&vp->bytes, u64);
127 bytes = clib_net_to_host_u64 (bytes);
129 s = format (s, "%U.%s.packets %lld\n",
130 format_vnet_sw_if_index_name,
131 sm->vnet_main, sw_if_index, counter_name, packets);
132 s = format (s, "%U.%s.bytes %lld\n",
133 format_vnet_sw_if_index_name,
134 sm->vnet_main, sw_if_index, counter_name, bytes);
141 format_vnet_interface_simple_counters (u8 * s, va_list * args)
143 stats_main_t *sm = &stats_main;
144 vl_api_vnet_interface_simple_counters_t *mp =
145 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
147 u32 count, sw_if_index;
148 count = ntohl (mp->count);
149 sw_if_index = ntohl (mp->first_sw_if_index);
151 vp = (u64 *) mp->data;
154 switch (mp->vnet_counter_type)
156 case VNET_INTERFACE_COUNTER_DROP:
157 counter_name = "drop";
159 case VNET_INTERFACE_COUNTER_PUNT:
160 counter_name = "punt";
162 case VNET_INTERFACE_COUNTER_IP4:
163 counter_name = "ip4";
165 case VNET_INTERFACE_COUNTER_IP6:
166 counter_name = "ip6";
168 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
169 counter_name = "rx-no-buff";
171 case VNET_INTERFACE_COUNTER_RX_MISS:
172 counter_name = "rx-miss";
174 case VNET_INTERFACE_COUNTER_RX_ERROR:
175 counter_name = "rx-error (fifo-full)";
177 case VNET_INTERFACE_COUNTER_TX_ERROR:
178 counter_name = "tx-error (fifo-full)";
181 counter_name = "bogus";
184 for (i = 0; i < count; i++)
186 v = clib_mem_unaligned (vp, u64);
187 v = clib_net_to_host_u64 (v);
189 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
190 sm->vnet_main, sw_if_index, counter_name, v);
198 dslock (stats_main_t * sm, int release_hint, int tag)
201 data_structure_lock_t *l = sm->data_structure_lock;
203 if (PREDICT_FALSE (l == 0))
206 thread_index = vlib_get_thread_index ();
207 if (l->lock && l->thread_index == thread_index)
216 while (__sync_lock_test_and_set (&l->lock, 1))
219 l->thread_index = thread_index;
224 stats_dslock_with_hint (int hint, int tag)
226 stats_main_t *sm = &stats_main;
227 dslock (sm, hint, tag);
231 dsunlock (stats_main_t * sm)
234 data_structure_lock_t *l = sm->data_structure_lock;
236 if (PREDICT_FALSE (l == 0))
239 thread_index = vlib_get_thread_index ();
240 ASSERT (l->lock && l->thread_index == thread_index);
246 CLIB_MEMORY_BARRIER ();
252 stats_dsunlock (int hint, int tag)
254 stats_main_t *sm = &stats_main;
258 static vpe_client_registration_t *
259 get_client_for_stat (u32 reg, u32 item, u32 client_index)
261 stats_main_t *sm = &stats_main;
262 vpe_client_stats_registration_t *registration;
265 /* Is there anything listening for item in that reg */
266 p = hash_get (sm->stats_registration_hash[reg], item);
271 /* If there is, is our client_index one of them */
272 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
273 p = hash_get (registration->client_hash, client_index);
278 return pool_elt_at_index (registration->clients, p[0]);
283 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
285 stats_main_t *sm = &stats_main;
286 vpe_client_stats_registration_t *registration;
287 vpe_client_registration_t *cr;
290 /* Is there anything listening for item in that reg */
291 p = hash_get (sm->stats_registration_hash[reg], item);
295 pool_get (sm->stats_registrations[reg], registration);
296 registration->item = item;
297 hash_set (sm->stats_registration_hash[reg], item,
298 registration - sm->stats_registrations[reg]);
302 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
305 p = hash_get (registration->client_hash, client->client_index);
309 pool_get (registration->clients, cr);
310 cr->client_index = client->client_index;
311 cr->client_pid = client->client_pid;
312 hash_set (registration->client_hash, cr->client_index,
313 cr - registration->clients);
316 return 1; //At least one client is doing something ... poll
320 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
322 stats_main_t *sm = &stats_main;
323 vpe_client_stats_registration_t *registration;
324 vpe_client_registration_t *client;
328 /* Clear the client first */
329 /* Is there anything listening for item in that reg */
330 p = hash_get (sm->stats_registration_hash[reg], item);
335 /* If there is, is our client_index one of them */
336 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
337 p = hash_get (registration->client_hash, client_index);
342 client = pool_elt_at_index (registration->clients, p[0]);
343 hash_unset (registration->client_hash, client->client_index);
344 pool_put (registration->clients, client);
346 /* Now check if that was the last client for that item */
347 if (0 == pool_elts (registration->clients))
349 hash_unset (sm->stats_registration_hash[reg], item);
350 pool_put (sm->stats_registrations[reg], registration);
355 /* Now check if that was the last item in any of the listened to stats */
356 for (i = 0; i < STATS_REG_N_IDX; i++)
358 elts += pool_elts (sm->stats_registrations[i]);
363 vpe_client_registration_t *
364 get_clients_for_stat (u32 reg, u32 item)
366 stats_main_t *sm = &stats_main;
367 vpe_client_registration_t *client, *clients = 0;
368 vpe_client_stats_registration_t *registration;
371 /* Is there anything listening for item in that reg */
372 p = hash_get (sm->stats_registration_hash[reg], item);
377 /* If there is, is our client_index one of them */
378 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
380 vec_reset_length (clients);
381 pool_foreach (client, registration->clients, (
383 vec_add1 (clients, *client);}
390 clear_client_reg (u32 ** registrations)
392 /* When registrations[x] is a vector of pool indices
393 here is a good place to clean up the pools
395 #define stats_reg(n) vec_free(registrations[IDX_##n]);
396 #include <vpp/stats/stats.reg>
399 vec_free (registrations);
403 init_client_reg (u32 ** registrations)
407 Initialise the stats registrations for each
408 type of stat a client can register for as well as
409 a vector of "interested" indexes.
410 Initially this is a u32 of either sw_if_index or fib_index
411 but eventually this should migrate to a pool_index (u32)
412 with a type specific pool that can include more complex things
413 such as timing and structured events.
415 vec_validate (registrations, STATS_REG_N_IDX);
416 #define stats_reg(n) \
417 vec_reset_length(registrations[IDX_##n]);
418 #include <vpp/stats/stats.reg>
422 When registrations[x] is a vector of pool indices, here
423 is a good place to init the pools.
425 return registrations;
429 enable_all_client_reg (u32 ** registrations)
433 Enable all stats known by adding
434 ~0 to the index vector. Eventually this
435 should be deprecated.
437 #define stats_reg(n) \
438 vec_add1(registrations[IDX_##n], ~0);
439 #include <vpp/stats/stats.reg>
441 return registrations;
445 do_simple_interface_counters (stats_main_t * sm)
447 vl_api_vnet_interface_simple_counters_t *mp = 0;
448 vnet_interface_main_t *im = sm->interface_main;
449 api_main_t *am = sm->api_main;
450 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
451 svm_queue_t *q = shmem_hdr->vl_input_queue;
452 vlib_simple_counter_main_t *cm;
453 u32 items_this_message = 0;
458 * Prevent interface registration from expanding / moving the vectors...
459 * That tends never to happen, so we can hold this lock for a while.
461 vnet_interface_counter_lock (im);
463 vec_foreach (cm, im->sw_if_counters)
465 n_counts = vlib_simple_counter_n_counters (cm);
466 for (i = 0; i < n_counts; i++)
470 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
473 mp = vl_msg_api_alloc_as_if_client
474 (sizeof (*mp) + items_this_message * sizeof (v));
475 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
476 mp->vnet_counter_type = cm - im->sw_if_counters;
477 mp->first_sw_if_index = htonl (i);
479 vp = (u64 *) mp->data;
481 v = vlib_get_simple_counter (cm, i);
482 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
485 if (mp->count == items_this_message)
487 mp->count = htonl (items_this_message);
488 /* Send to the main thread... */
489 vl_msg_api_send_shmem (q, (u8 *) & mp);
495 vnet_interface_counter_unlock (im);
499 handle_client_registration (vpe_client_registration_t * client, u32 stat,
500 u32 item, int enable_disable)
502 stats_main_t *sm = &stats_main;
503 vpe_client_registration_t *rp, _rp;
505 rp = get_client_for_stat (stat, item, client->client_index);
508 if (enable_disable == 0)
510 if (!rp) // No client to disable
512 clib_warning ("pid %d: already disabled for stats...",
517 clear_client_for_stat (stat, item, client->client_index);
524 rp->client_index = client->client_index;
525 rp->client_pid = client->client_pid;
526 sm->enable_poller = set_client_for_stat (stat, item, rp);
531 /**********************************
532 * ALL Interface Combined stats - to be deprecated
533 **********************************/
536 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
539 vl_api_want_interface_combined_stats_t_handler
540 (vl_api_want_interface_combined_stats_t * mp)
542 stats_main_t *sm = &stats_main;
543 vpe_client_registration_t rp;
544 vl_api_want_interface_combined_stats_reply_t *rmp;
547 vl_api_registration_t *reg;
550 swif = ~0; //Using same mechanism as _per_interface_
551 rp.client_index = mp->client_index;
552 rp.client_pid = mp->pid;
554 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
558 reg = vl_api_client_index_to_registration (mp->client_index);
562 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
567 rmp = vl_msg_api_alloc (sizeof (*rmp));
568 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
569 rmp->context = mp->context;
570 rmp->retval = retval;
572 vl_api_send_msg (reg, (u8 *) rmp);
576 vl_api_vnet_interface_combined_counters_t_handler
577 (vl_api_vnet_interface_combined_counters_t * mp)
579 vpe_client_registration_t *clients, client;
580 stats_main_t *sm = &stats_main;
581 vl_api_registration_t *reg, *reg_prev = NULL;
582 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
586 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
589 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
590 ~0 /*flag for all */ );
592 for (i = 0; i < vec_len (clients); i++)
595 reg = vl_api_client_index_to_registration (client.client_index);
598 if (reg_prev && vl_api_can_send_msg (reg_prev))
600 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
601 clib_memcpy (mp_copy, mp, mp_size);
602 vl_api_send_msg (reg_prev, (u8 *) mp);
609 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
612 if (reg_prev && vl_api_can_send_msg (reg_prev))
614 vl_api_send_msg (reg_prev, (u8 *) mp);
618 vl_msg_api_free (mp);
623 do_combined_interface_counters (stats_main_t * sm)
625 vl_api_vnet_interface_combined_counters_t *mp = 0;
626 vnet_interface_main_t *im = sm->interface_main;
627 api_main_t *am = sm->api_main;
628 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
629 svm_queue_t *q = shmem_hdr->vl_input_queue;
630 vlib_combined_counter_main_t *cm;
631 u32 items_this_message = 0;
632 vlib_counter_t v, *vp = 0;
635 vnet_interface_counter_lock (im);
637 vec_foreach (cm, im->combined_sw_if_counters)
639 n_counts = vlib_combined_counter_n_counters (cm);
640 for (i = 0; i < n_counts; i++)
644 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
647 mp = vl_msg_api_alloc_as_if_client
648 (sizeof (*mp) + items_this_message * sizeof (v));
649 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
650 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
651 mp->first_sw_if_index = htonl (i);
653 vp = (vlib_counter_t *) mp->data;
655 vlib_get_combined_counter (cm, i, &v);
656 clib_mem_unaligned (&vp->packets, u64)
657 = clib_host_to_net_u64 (v.packets);
658 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
661 if (mp->count == items_this_message)
663 mp->count = htonl (items_this_message);
664 /* Send to the main thread... */
665 vl_msg_api_send_shmem (q, (u8 *) & mp);
671 vnet_interface_counter_unlock (im);
674 /**********************************
675 * Per Interface Combined stats
676 **********************************/
678 /* Request from client registering interfaces it wants */
680 vl_api_want_per_interface_combined_stats_t_handler
681 (vl_api_want_per_interface_combined_stats_t * mp)
683 stats_main_t *sm = &stats_main;
684 vpe_client_registration_t rp;
685 vl_api_want_per_interface_combined_stats_reply_t *rmp;
686 vlib_combined_counter_main_t *cm;
689 vl_api_registration_t *reg;
690 u32 i, swif, num = 0;
692 num = ntohl (mp->num);
695 * Validate sw_if_indexes before registering
697 for (i = 0; i < num; i++)
699 swif = ntohl (mp->sw_ifs[i]);
702 * Check its a real sw_if_index that the client is allowed to see
706 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
708 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
714 for (i = 0; i < num; i++)
716 swif = ntohl (mp->sw_ifs[i]);
718 rp.client_index = mp->client_index;
719 rp.client_pid = mp->pid;
720 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
721 swif, ntohl (mp->enable_disable));
725 reg = vl_api_client_index_to_registration (mp->client_index);
728 for (i = 0; i < num; i++)
730 swif = ntohl (mp->sw_ifs[i]);
733 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
739 rmp = vl_msg_api_alloc (sizeof (*rmp));
740 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
741 rmp->context = mp->context;
742 rmp->retval = retval;
744 vl_api_send_msg (reg, (u8 *) rmp);
747 /* Per Interface Combined distribution to client */
749 do_combined_per_interface_counters (stats_main_t * sm)
751 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
752 vnet_interface_main_t *im = sm->interface_main;
753 api_main_t *am = sm->api_main;
754 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
755 vl_api_registration_t *vl_reg;
756 vlib_combined_counter_main_t *cm;
757 vl_api_vnet_combined_counter_t *vp = 0;
760 vpe_client_stats_registration_t *reg;
761 vpe_client_registration_t *client;
762 u32 *sw_if_index = 0;
764 vnet_interface_counter_lock (im);
766 vec_reset_length (sm->regs_tmp);
770 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
771 ({ vec_add1 (sm->regs_tmp, reg); }));
774 for (i = 0; i < vec_len (sm->regs_tmp); i++)
776 reg = sm->regs_tmp[i];
779 vnet_interface_counter_unlock (im);
780 do_combined_interface_counters (sm);
781 vnet_interface_counter_lock (im);
784 vec_reset_length (sm->clients_tmp);
787 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
791 for (j = 0; j < vec_len (sm->clients_tmp); j++)
793 client = sm->clients_tmp[j];
795 vl_reg = vl_api_client_index_to_registration (client->client_index);
797 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
801 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
802 reg->item, client->client_index);
805 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
806 memset (mp, 0, sizeof (*mp));
809 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
812 * count will eventually be used to optimise the batching
813 * of per client messages for each stat. For now setting this to 1 then
814 * iterate. This will not affect API.
816 * FIXME instead of enqueueing here, this should be sent to a batch
817 * storer for per-client transmission. Each "mp" sent would be a single entry
818 * and if a client is listening to other sw_if_indexes for same, it would be
819 * appended to that *mp
823 * - capturing the timestamp of the counters "when VPP knew them" is important.
824 * Less so is that the timing of the delivery to the control plane be in the same
827 * i.e. As long as the control plane can delta messages from VPP and work out
828 * velocity etc based on the timestamp, it can do so in a more "batch mode".
830 * It would be beneficial to keep a "per-client" message queue, and then
831 * batch all the stat messages for a client into one message, with
832 * discrete timestamps.
834 * Given this particular API is for "per interface" one assumes that the scale
835 * is less than the ~0 case, which the prior API is suited for.
839 * 1 message per api call for now
841 mp->count = htonl (1);
842 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
844 vp = (vl_api_vnet_combined_counter_t *) mp->data;
845 vp->sw_if_index = htonl (reg->item);
847 im = &vnet_get_main ()->interface_main;
848 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
849 vlib_get_combined_counter (cm, reg->item, &v);
850 clib_mem_unaligned (&vp->rx_packets, u64) =
851 clib_host_to_net_u64 (v.packets);
852 clib_mem_unaligned (&vp->rx_bytes, u64) =
853 clib_host_to_net_u64 (v.bytes);
854 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
855 vlib_get_combined_counter (cm, reg->item, &v);
856 clib_mem_unaligned (&vp->tx_packets, u64) =
857 clib_host_to_net_u64 (v.packets);
858 clib_mem_unaligned (&vp->tx_bytes, u64) =
859 clib_host_to_net_u64 (v.bytes);
861 vl_api_send_msg (vl_reg, (u8 *) mp);
865 vnet_interface_counter_unlock (im);
868 /**********************************
869 * Per Interface simple stats
870 **********************************/
872 /* Request from client registering interfaces it wants */
874 vl_api_want_per_interface_simple_stats_t_handler
875 (vl_api_want_per_interface_simple_stats_t * mp)
877 stats_main_t *sm = &stats_main;
878 vpe_client_registration_t rp;
879 vl_api_want_per_interface_simple_stats_reply_t *rmp;
880 vlib_simple_counter_main_t *cm;
883 vl_api_registration_t *reg;
884 u32 i, swif, num = 0;
886 num = ntohl (mp->num);
888 for (i = 0; i < num; i++)
890 swif = ntohl (mp->sw_ifs[i]);
892 /* Check its a real sw_if_index that the client is allowed to see */
895 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
897 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
903 for (i = 0; i < num; i++)
905 swif = ntohl (mp->sw_ifs[i]);
907 rp.client_index = mp->client_index;
908 rp.client_pid = mp->pid;
909 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
910 swif, ntohl (mp->enable_disable));
914 reg = vl_api_client_index_to_registration (mp->client_index);
916 /* Client may have disconnected abruptly, clean up */
919 for (i = 0; i < num; i++)
921 swif = ntohl (mp->sw_ifs[i]);
923 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
931 rmp = vl_msg_api_alloc (sizeof (*rmp));
932 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
933 rmp->context = mp->context;
934 rmp->retval = retval;
936 vl_api_send_msg (reg, (u8 *) rmp);
939 /* Per Interface Simple distribution to client */
941 do_simple_per_interface_counters (stats_main_t * sm)
943 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
944 vnet_interface_main_t *im = sm->interface_main;
945 api_main_t *am = sm->api_main;
946 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
947 vl_api_registration_t *vl_reg;
948 vlib_simple_counter_main_t *cm;
950 vpe_client_stats_registration_t *reg;
951 vpe_client_registration_t *client;
952 u32 timestamp, count;
953 vl_api_vnet_simple_counter_t *vp = 0;
956 vnet_interface_counter_lock (im);
958 vec_reset_length (sm->regs_tmp);
962 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
963 ({ vec_add1 (sm->regs_tmp, reg); }));
966 for (i = 0; i < vec_len (sm->regs_tmp); i++)
968 reg = sm->regs_tmp[i];
971 vnet_interface_counter_unlock (im);
972 do_simple_interface_counters (sm);
973 vnet_interface_counter_lock (im);
976 vec_reset_length (sm->clients_tmp);
979 pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
983 for (j = 0; j < vec_len (sm->clients_tmp); j++)
985 client = sm->clients_tmp[j];
986 vl_reg = vl_api_client_index_to_registration (client->client_index);
988 /* Client may have disconnected abrubtly, clean up */
992 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
993 reg->item, client->client_index);
997 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
998 memset (mp, 0, sizeof (*mp));
999 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1002 * count will eventually be used to optimise the batching
1003 * of per client messages for each stat. For now setting this to 1 then
1004 * iterate. This will not affect API.
1006 * FIXME instead of enqueueing here, this should be sent to a batch
1007 * storer for per-client transmission. Each "mp" sent would be a single entry
1008 * and if a client is listening to other sw_if_indexes for same, it would be
1009 * appended to that *mp
1013 * - capturing the timestamp of the counters "when VPP knew them" is important.
1014 * Less so is that the timing of the delivery to the control plane be in the same
1017 * i.e. As long as the control plane can delta messages from VPP and work out
1018 * velocity etc based on the timestamp, it can do so in a more "batch mode".
1020 * It would be beneficial to keep a "per-client" message queue, and then
1021 * batch all the stat messages for a client into one message, with
1022 * discrete timestamps.
1024 * Given this particular API is for "per interface" one assumes that the scale
1025 * is less than the ~0 case, which the prior API is suited for.
1029 * 1 message per api call for now
1031 mp->count = htonl (1);
1032 mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1033 vp = (vl_api_vnet_simple_counter_t *) mp->data;
1035 vp->sw_if_index = htonl (reg->item);
1037 // VNET_INTERFACE_COUNTER_DROP
1038 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1039 v = vlib_get_simple_counter (cm, reg->item);
1040 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1042 // VNET_INTERFACE_COUNTER_PUNT
1043 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1044 v = vlib_get_simple_counter (cm, reg->item);
1045 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1047 // VNET_INTERFACE_COUNTER_IP4
1048 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1049 v = vlib_get_simple_counter (cm, reg->item);
1050 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1052 //VNET_INTERFACE_COUNTER_IP6
1053 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1054 v = vlib_get_simple_counter (cm, reg->item);
1055 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1057 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1058 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1059 v = vlib_get_simple_counter (cm, reg->item);
1060 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1061 clib_host_to_net_u64 (v);
1063 //VNET_INTERFACE_COUNTER_RX_MISS
1064 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1065 v = vlib_get_simple_counter (cm, reg->item);
1066 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1068 //VNET_INTERFACE_COUNTER_RX_ERROR
1069 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1070 v = vlib_get_simple_counter (cm, reg->item);
1071 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1073 //VNET_INTERFACE_COUNTER_TX_ERROR
1074 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1075 v = vlib_get_simple_counter (cm, reg->item);
1076 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1078 //VNET_INTERFACE_COUNTER_MPLS
1079 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1080 v = vlib_get_simple_counter (cm, reg->item);
1081 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1083 vl_api_send_msg (vl_reg, (u8 *) mp);
1087 vnet_interface_counter_unlock (im);
1090 /**********************************
1092 **********************************/
1095 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1097 struct timespec _req, *req = &_req;
1098 struct timespec _rem, *rem = &_rem;
1101 req->tv_nsec = nsec;
1104 if (nanosleep (req, rem) == 0)
1109 clib_unix_warning ("nanosleep");
1115 * @brief The context passed when collecting adjacency counters
1117 typedef struct ip4_nbr_stats_ctx_t_
1120 * The SW IF index all these adjs belong to
1125 * A vector of ip4 nbr counters
1127 vl_api_ip4_nbr_counter_t *counters;
1128 } ip4_nbr_stats_ctx_t;
1130 static adj_walk_rc_t
1131 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1133 vl_api_ip4_nbr_counter_t *vl_counter;
1134 vlib_counter_t adj_counter;
1135 ip4_nbr_stats_ctx_t *ctx;
1136 ip_adjacency_t *adj;
1139 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1141 if (0 != adj_counter.packets)
1143 vec_add2 (ctx->counters, vl_counter, 1);
1146 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1147 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1148 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1149 vl_counter->link_type = adj->ia_link;
1151 return (ADJ_WALK_RC_CONTINUE);
1154 #define MIN(x,y) (((x)<(y))?(x):(y))
1157 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1159 api_main_t *am = sm->api_main;
1160 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1161 svm_queue_t *q = shmem_hdr->vl_input_queue;
1162 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1166 * If the walk context has counters, which may be left over from the last
1167 * suspend, then we continue from there.
1169 while (0 != vec_len (ctx->counters))
1171 u32 n_items = MIN (vec_len (ctx->counters),
1172 IP4_FIB_COUNTER_BATCH_SIZE);
1175 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1177 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1180 (vl_api_ip4_nbr_counter_t)));
1181 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1182 mp->count = ntohl (n_items);
1183 mp->sw_if_index = ntohl (ctx->sw_if_index);
1188 * copy the counters from the back of the context, then we can easily
1189 * 'erase' them by resetting the vector length.
1190 * The order we push the stats to the caller is not important.
1193 &ctx->counters[vec_len (ctx->counters) - n_items],
1194 n_items * sizeof (*ctx->counters));
1196 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1202 pause = svm_queue_is_full (q);
1204 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1205 svm_queue_unlock (q);
1209 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1210 STATS_RELEASE_DELAY_NS);
1215 do_ip4_nbr_counters (stats_main_t * sm)
1217 vnet_main_t *vnm = vnet_get_main ();
1218 vnet_interface_main_t *im = &vnm->interface_main;
1219 vnet_sw_interface_t *si;
1221 ip4_nbr_stats_ctx_t ctx = {
1227 pool_foreach (si, im->sw_interfaces,
1230 * update the interface we are now concerned with
1232 ctx.sw_if_index = si->sw_if_index;
1235 * we are about to walk another interface, so we shouldn't have any pending
1238 ASSERT(ctx.counters == NULL);
1241 * visit each neighbour adjacency on the interface and collect
1242 * its current stats.
1243 * Because we hold the lock the walk is synchronous, so safe to routing
1244 * updates. It's limited in work by the number of adjacenies on an
1245 * interface, which is typically not huge.
1247 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1248 adj_nbr_walk (si->sw_if_index,
1255 * if this interface has some adjacencies with counters then ship them,
1256 * else continue to the next interface.
1258 if (NULL != ctx.counters)
1260 ip4_nbr_ship(sm, &ctx);
1267 * @brief The context passed when collecting adjacency counters
1269 typedef struct ip6_nbr_stats_ctx_t_
1272 * The SW IF index all these adjs belong to
1277 * A vector of ip6 nbr counters
1279 vl_api_ip6_nbr_counter_t *counters;
1280 } ip6_nbr_stats_ctx_t;
1282 static adj_walk_rc_t
1283 ip6_nbr_stats_cb (adj_index_t ai,
1286 vl_api_ip6_nbr_counter_t *vl_counter;
1287 vlib_counter_t adj_counter;
1288 ip6_nbr_stats_ctx_t *ctx;
1289 ip_adjacency_t *adj;
1292 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1294 if (0 != adj_counter.packets)
1296 vec_add2(ctx->counters, vl_counter, 1);
1299 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1300 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1301 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1302 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1303 vl_counter->link_type = adj->ia_link;
1305 return (ADJ_WALK_RC_CONTINUE);
1308 #define MIN(x,y) (((x)<(y))?(x):(y))
1311 ip6_nbr_ship (stats_main_t * sm,
1312 ip6_nbr_stats_ctx_t *ctx)
1314 api_main_t *am = sm->api_main;
1315 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1316 svm_queue_t *q = shmem_hdr->vl_input_queue;
1317 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1321 * If the walk context has counters, which may be left over from the last
1322 * suspend, then we continue from there.
1324 while (0 != vec_len(ctx->counters))
1326 u32 n_items = MIN (vec_len (ctx->counters),
1327 IP6_FIB_COUNTER_BATCH_SIZE);
1330 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1332 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1335 (vl_api_ip6_nbr_counter_t)));
1336 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1337 mp->count = ntohl (n_items);
1338 mp->sw_if_index = ntohl (ctx->sw_if_index);
1343 * copy the counters from the back of the context, then we can easily
1344 * 'erase' them by resetting the vector length.
1345 * The order we push the stats to the caller is not important.
1348 &ctx->counters[vec_len (ctx->counters) - n_items],
1349 n_items * sizeof (*ctx->counters));
1351 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1357 pause = svm_queue_is_full (q);
1359 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1360 svm_queue_unlock (q);
1364 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1365 STATS_RELEASE_DELAY_NS);
1370 do_ip6_nbr_counters (stats_main_t * sm)
1372 vnet_main_t *vnm = vnet_get_main ();
1373 vnet_interface_main_t *im = &vnm->interface_main;
1374 vnet_sw_interface_t *si;
1376 ip6_nbr_stats_ctx_t ctx = {
1382 pool_foreach (si, im->sw_interfaces,
1385 * update the interface we are now concerned with
1387 ctx.sw_if_index = si->sw_if_index;
1390 * we are about to walk another interface, so we shouldn't have any pending
1393 ASSERT(ctx.counters == NULL);
1396 * visit each neighbour adjacency on the interface and collect
1397 * its current stats.
1398 * Because we hold the lock the walk is synchronous, so safe to routing
1399 * updates. It's limited in work by the number of adjacenies on an
1400 * interface, which is typically not huge.
1402 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1403 adj_nbr_walk (si->sw_if_index,
1410 * if this interface has some adjacencies with counters then ship them,
1411 * else continue to the next interface.
1413 if (NULL != ctx.counters)
1415 ip6_nbr_ship(sm, &ctx);
1422 do_ip4_fib_counters (stats_main_t * sm)
1424 ip4_main_t *im4 = &ip4_main;
1425 api_main_t *am = sm->api_main;
1426 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1427 svm_queue_t *q = shmem_hdr->vl_input_queue;
1431 do_ip46_fibs_t *do_fibs;
1432 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1433 u32 items_this_message;
1434 vl_api_ip4_fib_counter_t *ctrp = 0;
1435 u32 start_at_fib_index = 0;
1438 do_fibs = &sm->do_ip46_fibs;
1441 vec_reset_length (do_fibs->fibs);
1443 pool_foreach (fib, im4->fibs,
1444 ({vec_add1(do_fibs->fibs,fib);}));
1448 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1450 fib = do_fibs->fibs[j];
1451 /* We may have bailed out due to control-plane activity */
1452 while ((fib - im4->fibs) < start_at_fib_index)
1455 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1459 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1460 mp = vl_msg_api_alloc_as_if_client
1462 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1463 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1465 mp->vrf_id = ntohl (fib->ft_table_id);
1466 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1470 /* happens if the last FIB was empty... */
1471 ASSERT (mp->count == 0);
1472 mp->vrf_id = ntohl (fib->ft_table_id);
1475 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1477 vec_reset_length (do_fibs->ip4routes);
1478 vec_reset_length (do_fibs->results);
1480 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1482 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1486 vec_reset_length (do_fibs->pvec);
1488 x.address_length = i;
1490 hash_foreach_pair (p, hash, (
1492 vec_add1 (do_fibs->pvec, p);}
1494 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1496 p = do_fibs->pvec[k];
1497 x.address.data_u32 = p->key;
1498 x.index = p->value[0];
1500 vec_add1 (do_fibs->ip4routes, x);
1501 if (sm->data_structure_lock->release_hint)
1503 start_at_fib_index = fib - im4->fibs;
1505 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1506 STATS_RELEASE_DELAY_NS);
1508 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1514 vec_foreach (r, do_fibs->ip4routes)
1517 const dpo_id_t *dpo_id;
1520 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1521 index = (u32) dpo_id->dpoi_index;
1523 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1526 * If it has actually
1527 * seen at least one packet, send it.
1532 /* already in net byte order */
1533 ctrp->address = r->address.as_u32;
1534 ctrp->address_length = r->address_length;
1535 ctrp->packets = clib_host_to_net_u64 (c.packets);
1536 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1540 if (mp->count == items_this_message)
1542 mp->count = htonl (items_this_message);
1544 * If the main thread's input queue is stuffed,
1545 * drop the data structure lock (which the main thread
1546 * may want), and take a pause.
1549 if (svm_queue_is_full (q))
1552 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1553 svm_queue_unlock (q);
1555 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1556 STATS_RELEASE_DELAY_NS);
1559 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1560 svm_queue_unlock (q);
1562 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1563 mp = vl_msg_api_alloc_as_if_client
1565 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1566 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1568 mp->vrf_id = ntohl (fib->ft_table_id);
1569 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1571 } /* for each (mp or single) adj */
1572 if (sm->data_structure_lock->release_hint)
1574 start_at_fib_index = fib - im4->fibs;
1576 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1578 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1581 } /* vec_foreach (routes) */
1585 /* Flush any data from this fib */
1588 mp->count = htonl (mp->count);
1589 vl_msg_api_send_shmem (q, (u8 *) & mp);
1594 /* If e.g. the last FIB had no reportable routes, free the buffer */
1596 vl_msg_api_free (mp);
1600 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1602 stats_main_t *sm = ctx;
1603 do_ip46_fibs_t *do_fibs;
1604 mfib_entry_t *entry;
1606 do_fibs = &sm->do_ip46_fibs;
1607 entry = mfib_entry_get (fei);
1609 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1615 do_ip4_mfib_counters (stats_main_t * sm)
1617 ip4_main_t *im4 = &ip4_main;
1618 api_main_t *am = sm->api_main;
1619 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1620 svm_queue_t *q = shmem_hdr->vl_input_queue;
1623 do_ip46_fibs_t *do_fibs;
1624 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1625 u32 items_this_message;
1626 vl_api_ip4_mfib_counter_t *ctrp = 0;
1627 u32 start_at_mfib_index = 0;
1630 do_fibs = &sm->do_ip46_fibs;
1632 vec_reset_length (do_fibs->mfibs);
1634 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1637 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1639 mfib = do_fibs->mfibs[j];
1640 /* We may have bailed out due to control-plane activity */
1641 while ((mfib - im4->mfibs) < start_at_mfib_index)
1646 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1647 mp = vl_msg_api_alloc_as_if_client
1649 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1650 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1652 mp->vrf_id = ntohl (mfib->mft_table_id);
1653 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1657 /* happens if the last MFIB was empty... */
1658 ASSERT (mp->count == 0);
1659 mp->vrf_id = ntohl (mfib->mft_table_id);
1662 vec_reset_length (do_fibs->mroutes);
1665 * walk the table with table updates blocked
1667 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1669 mfib_table_walk (mfib->mft_index,
1670 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1673 vec_foreach (pfx, do_fibs->mroutes)
1675 const dpo_id_t *dpo_id;
1676 fib_node_index_t mfei;
1681 * re-lookup the entry, since we suspend during the collection
1683 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1685 if (FIB_NODE_INDEX_INVALID == mfei)
1688 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1689 index = (u32) dpo_id->dpoi_index;
1691 vlib_get_combined_counter (&replicate_main.repm_counters,
1692 dpo_id->dpoi_index, &c);
1694 * If it has seen at least one packet, send it.
1698 /* already in net byte order */
1699 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1700 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1701 ctrp->group_length = pfx->fp_len;
1702 ctrp->packets = clib_host_to_net_u64 (c.packets);
1703 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1707 if (mp->count == items_this_message)
1709 mp->count = htonl (items_this_message);
1711 * If the main thread's input queue is stuffed,
1712 * drop the data structure lock (which the main thread
1713 * may want), and take a pause.
1717 while (svm_queue_is_full (q))
1719 svm_queue_unlock (q);
1720 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1721 STATS_RELEASE_DELAY_NS);
1724 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1725 svm_queue_unlock (q);
1727 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1728 mp = vl_msg_api_alloc_as_if_client
1730 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1731 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1733 mp->vrf_id = ntohl (mfib->mft_table_id);
1734 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1739 /* Flush any data from this mfib */
1742 mp->count = htonl (mp->count);
1743 vl_msg_api_send_shmem (q, (u8 *) & mp);
1748 /* If e.g. the last FIB had no reportable routes, free the buffer */
1750 vl_msg_api_free (mp);
1754 do_ip6_mfib_counters (stats_main_t * sm)
1756 ip6_main_t *im6 = &ip6_main;
1757 api_main_t *am = sm->api_main;
1758 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1759 svm_queue_t *q = shmem_hdr->vl_input_queue;
1762 do_ip46_fibs_t *do_fibs;
1763 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1764 u32 items_this_message;
1765 vl_api_ip6_mfib_counter_t *ctrp = 0;
1766 u32 start_at_mfib_index = 0;
1769 do_fibs = &sm->do_ip46_fibs;
1771 vec_reset_length (do_fibs->mfibs);
1773 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1776 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1778 mfib = do_fibs->mfibs[j];
1779 /* We may have bailed out due to control-plane activity */
1780 while ((mfib - im6->mfibs) < start_at_mfib_index)
1785 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1786 mp = vl_msg_api_alloc_as_if_client
1788 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1789 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1791 mp->vrf_id = ntohl (mfib->mft_table_id);
1792 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1796 /* happens if the last MFIB was empty... */
1797 ASSERT (mp->count == 0);
1798 mp->vrf_id = ntohl (mfib->mft_table_id);
1801 vec_reset_length (do_fibs->mroutes);
1804 * walk the table with table updates blocked
1806 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1808 mfib_table_walk (mfib->mft_index,
1809 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1812 vec_foreach (pfx, do_fibs->mroutes)
1814 const dpo_id_t *dpo_id;
1815 fib_node_index_t mfei;
1820 * re-lookup the entry, since we suspend during the collection
1822 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1824 if (FIB_NODE_INDEX_INVALID == mfei)
1827 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1828 index = (u32) dpo_id->dpoi_index;
1830 vlib_get_combined_counter (&replicate_main.repm_counters,
1831 dpo_id->dpoi_index, &c);
1833 * If it has seen at least one packet, send it.
1837 /* already in net byte order */
1838 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1839 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1840 ctrp->group_length = pfx->fp_len;
1841 ctrp->packets = clib_host_to_net_u64 (c.packets);
1842 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1846 if (mp->count == items_this_message)
1848 mp->count = htonl (items_this_message);
1850 * If the main thread's input queue is stuffed,
1851 * drop the data structure lock (which the main thread
1852 * may want), and take a pause.
1856 while (svm_queue_is_full (q))
1858 svm_queue_unlock (q);
1859 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1860 STATS_RELEASE_DELAY_NS);
1863 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1864 svm_queue_unlock (q);
1866 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1867 mp = vl_msg_api_alloc_as_if_client
1869 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1870 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1872 mp->vrf_id = ntohl (mfib->mft_table_id);
1873 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1878 /* Flush any data from this mfib */
1881 mp->count = htonl (mp->count);
1882 vl_msg_api_send_shmem (q, (u8 *) & mp);
1887 /* If e.g. the last FIB had no reportable routes, free the buffer */
1889 vl_msg_api_free (mp);
1895 ip6_route_t **routep;
1897 } add_routes_in_fib_arg_t;
1900 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1902 add_routes_in_fib_arg_t *ap = arg;
1903 stats_main_t *sm = ap->sm;
1905 if (sm->data_structure_lock->release_hint)
1906 clib_longjmp (&sm->jmp_buf, 1);
1908 if (kvp->key[2] >> 32 == ap->fib_index)
1910 ip6_address_t *addr;
1912 addr = (ip6_address_t *) kvp;
1913 vec_add2 (*ap->routep, r, 1);
1914 r->address = addr[0];
1915 r->address_length = kvp->key[2] & 0xFF;
1916 r->index = kvp->value;
1921 do_ip6_fib_counters (stats_main_t * sm)
1923 ip6_main_t *im6 = &ip6_main;
1924 api_main_t *am = sm->api_main;
1925 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1926 svm_queue_t *q = shmem_hdr->vl_input_queue;
1929 do_ip46_fibs_t *do_fibs;
1930 vl_api_vnet_ip6_fib_counters_t *mp = 0;
1931 u32 items_this_message;
1932 vl_api_ip6_fib_counter_t *ctrp = 0;
1933 u32 start_at_fib_index = 0;
1934 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
1935 add_routes_in_fib_arg_t _a, *a = &_a;
1938 do_fibs = &sm->do_ip46_fibs;
1940 vec_reset_length (do_fibs->fibs);
1942 pool_foreach (fib, im6->fibs,
1943 ({vec_add1(do_fibs->fibs,fib);}));
1947 for (i = 0; i < vec_len (do_fibs->fibs); i++)
1949 fib = do_fibs->fibs[i];
1950 /* We may have bailed out due to control-plane activity */
1951 while ((fib - im6->fibs) < start_at_fib_index)
1956 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1957 mp = vl_msg_api_alloc_as_if_client
1959 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1960 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1962 mp->vrf_id = ntohl (fib->ft_table_id);
1963 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1966 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1968 vec_reset_length (do_fibs->ip6routes);
1969 vec_reset_length (do_fibs->results);
1971 a->fib_index = fib - im6->fibs;
1972 a->routep = &do_fibs->ip6routes;
1975 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
1977 start_at_fib_index = fib - im6->fibs;
1978 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
1983 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1984 STATS_RELEASE_DELAY_NS);
1986 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1990 vec_foreach (r, do_fibs->ip6routes)
1994 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1997 * If it has actually
1998 * seen at least one packet, send it.
2002 /* already in net byte order */
2003 ctrp->address[0] = r->address.as_u64[0];
2004 ctrp->address[1] = r->address.as_u64[1];
2005 ctrp->address_length = (u8) r->address_length;
2006 ctrp->packets = clib_host_to_net_u64 (c.packets);
2007 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2011 if (mp->count == items_this_message)
2013 mp->count = htonl (items_this_message);
2015 * If the main thread's input queue is stuffed,
2016 * drop the data structure lock (which the main thread
2017 * may want), and take a pause.
2020 if (svm_queue_is_full (q))
2023 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2024 svm_queue_unlock (q);
2026 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2027 STATS_RELEASE_DELAY_NS);
2030 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2031 svm_queue_unlock (q);
2033 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2034 mp = vl_msg_api_alloc_as_if_client
2036 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2037 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2039 mp->vrf_id = ntohl (fib->ft_table_id);
2040 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2044 if (sm->data_structure_lock->release_hint)
2046 start_at_fib_index = fib - im6->fibs;
2048 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2050 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2053 } /* vec_foreach (routes) */
2057 /* Flush any data from this fib */
2060 mp->count = htonl (mp->count);
2061 vl_msg_api_send_shmem (q, (u8 *) & mp);
2066 /* If e.g. the last FIB had no reportable routes, free the buffer */
2068 vl_msg_api_free (mp);
2072 stats_thread_fn (void *arg)
2074 stats_main_t *sm = &stats_main;
2075 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2076 vlib_thread_main_t *tm = vlib_get_thread_main ();
2078 /* stats thread wants no signals. */
2082 pthread_sigmask (SIG_SETMASK, &s, 0);
2085 if (vec_len (tm->thread_prefix))
2086 vlib_set_thread_name ((char *)
2087 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2089 clib_mem_set_heap (w->thread_mheap);
2093 /* 10 second poll interval */
2094 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
2096 if (!(sm->enable_poller))
2101 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2102 do_combined_per_interface_counters (sm);
2105 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2106 do_simple_per_interface_counters (sm);
2108 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2109 do_ip4_fib_counters (sm);
2111 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2112 do_ip6_fib_counters (sm);
2114 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2115 do_ip4_mfib_counters (sm);
2117 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2118 do_ip6_mfib_counters (sm);
2120 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2121 do_ip4_nbr_counters (sm);
2123 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2124 do_ip6_nbr_counters (sm);
2129 vl_api_vnet_interface_simple_counters_t_handler
2130 (vl_api_vnet_interface_simple_counters_t * mp)
2132 vpe_client_registration_t *clients, client;
2133 stats_main_t *sm = &stats_main;
2134 vl_api_registration_t *reg, *reg_prev = NULL;
2135 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2139 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2142 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2143 ~0 /*flag for all */ );
2145 for (i = 0; i < vec_len (clients); i++)
2147 client = clients[i];
2148 reg = vl_api_client_index_to_registration (client.client_index);
2151 if (reg_prev && vl_api_can_send_msg (reg_prev))
2153 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2154 clib_memcpy (mp_copy, mp, mp_size);
2155 vl_api_send_msg (reg_prev, (u8 *) mp);
2163 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2164 client.client_index);
2170 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2173 if (reg_prev && vl_api_can_send_msg (reg_prev))
2175 vl_api_send_msg (reg_prev, (u8 *) mp);
2179 vl_msg_api_free (mp);
2184 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2186 stats_main_t *sm = &stats_main;
2187 vl_api_registration_t *reg, *reg_prev = NULL;
2188 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2190 vpe_client_registration_t *clients, client;
2193 mp_size = sizeof (*mp_copy) +
2194 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2197 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2199 for (i = 0; i < vec_len (clients); i++)
2201 client = clients[i];
2202 reg = vl_api_client_index_to_registration (client.client_index);
2205 if (reg_prev && vl_api_can_send_msg (reg_prev))
2207 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2208 clib_memcpy (mp_copy, mp, mp_size);
2209 vl_api_send_msg (reg_prev, (u8 *) mp);
2216 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2217 ~0, client.client_index);
2222 if (reg_prev && vl_api_can_send_msg (reg_prev))
2224 vl_api_send_msg (reg_prev, (u8 *) mp);
2228 vl_msg_api_free (mp);
2233 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2235 stats_main_t *sm = &stats_main;
2236 vl_api_registration_t *reg, *reg_prev = NULL;
2237 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2239 vpe_client_registration_t *clients, client;
2242 mp_size = sizeof (*mp_copy) +
2243 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2246 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2248 for (i = 0; i < vec_len (clients); i++)
2250 client = clients[i];
2251 reg = vl_api_client_index_to_registration (client.client_index);
2254 if (reg_prev && vl_api_can_send_msg (reg_prev))
2256 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2257 clib_memcpy (mp_copy, mp, mp_size);
2258 vl_api_send_msg (reg_prev, (u8 *) mp);
2265 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2266 ~0, client.client_index);
2272 if (reg_prev && vl_api_can_send_msg (reg_prev))
2274 vl_api_send_msg (reg_prev, (u8 *) mp);
2278 vl_msg_api_free (mp);
2283 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2285 stats_main_t *sm = &stats_main;
2286 vl_api_registration_t *reg, *reg_prev = NULL;
2287 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2289 vpe_client_registration_t *clients, client;
2292 mp_size = sizeof (*mp_copy) +
2293 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2296 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2298 for (i = 0; i < vec_len (clients); i++)
2300 client = clients[i];
2301 reg = vl_api_client_index_to_registration (client.client_index);
2304 if (reg_prev && vl_api_can_send_msg (reg_prev))
2306 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2307 clib_memcpy (mp_copy, mp, mp_size);
2308 vl_api_send_msg (reg_prev, (u8 *) mp);
2315 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2316 ~0, client.client_index);
2321 if (reg_prev && vl_api_can_send_msg (reg_prev))
2323 vl_api_send_msg (reg_prev, (u8 *) mp);
2327 vl_msg_api_free (mp);
2332 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2334 stats_main_t *sm = &stats_main;
2335 vl_api_registration_t *reg, *reg_prev = NULL;
2336 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2338 vpe_client_registration_t *clients, client;
2341 mp_size = sizeof (*mp_copy) +
2342 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2345 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2347 for (i = 0; i < vec_len (clients); i++)
2349 client = clients[i];
2350 reg = vl_api_client_index_to_registration (client.client_index);
2353 if (reg_prev && vl_api_can_send_msg (reg_prev))
2355 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2356 clib_memcpy (mp_copy, mp, mp_size);
2357 vl_api_send_msg (reg_prev, (u8 *) mp);
2364 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2365 ~0, client.client_index);
2370 if (reg_prev && vl_api_can_send_msg (reg_prev))
2372 vl_api_send_msg (reg_prev, (u8 *) mp);
2376 vl_msg_api_free (mp);
2381 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2383 stats_main_t *sm = &stats_main;
2384 vpe_client_registration_t rp;
2385 vl_api_want_stats_reply_t *rmp;
2389 vl_api_registration_t *reg;
2391 item = ~0; //"ALL THE THINGS IN THE THINGS
2392 rp.client_index = mp->client_index;
2393 rp.client_pid = mp->pid;
2395 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2396 item, mp->enable_disable);
2398 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2399 item, mp->enable_disable);
2401 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2402 item, mp->enable_disable);
2404 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2405 item, mp->enable_disable);
2407 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2408 item, mp->enable_disable);
2410 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2411 item, mp->enable_disable);
2414 reg = vl_api_client_index_to_registration (mp->client_index);
2418 rmp = vl_msg_api_alloc (sizeof (*rmp));
2419 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2420 rmp->context = mp->context;
2421 rmp->retval = retval;
2423 vl_api_send_msg (reg, (u8 *) rmp);
2427 vl_api_want_interface_simple_stats_t_handler
2428 (vl_api_want_interface_simple_stats_t * mp)
2430 stats_main_t *sm = &stats_main;
2431 vpe_client_registration_t rp;
2432 vl_api_want_interface_simple_stats_reply_t *rmp;
2436 vl_api_registration_t *reg;
2438 swif = ~0; //Using same mechanism as _per_interface_
2439 rp.client_index = mp->client_index;
2440 rp.client_pid = mp->pid;
2442 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2443 mp->enable_disable);
2446 reg = vl_api_client_index_to_registration (mp->client_index);
2451 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2456 rmp = vl_msg_api_alloc (sizeof (*rmp));
2457 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2458 rmp->context = mp->context;
2459 rmp->retval = retval;
2461 vl_api_send_msg (reg, (u8 *) rmp);
2466 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2468 stats_main_t *sm = &stats_main;
2469 vpe_client_registration_t rp;
2470 vl_api_want_ip4_fib_stats_reply_t *rmp;
2473 vl_api_registration_t *reg;
2476 fib = ~0; //Using same mechanism as _per_interface_
2477 rp.client_index = mp->client_index;
2478 rp.client_pid = mp->pid;
2480 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2481 mp->enable_disable);
2484 reg = vl_api_client_index_to_registration (mp->client_index);
2488 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2489 fib, mp->client_index);
2493 rmp = vl_msg_api_alloc (sizeof (*rmp));
2494 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2495 rmp->context = mp->context;
2496 rmp->retval = retval;
2498 vl_api_send_msg (reg, (u8 *) rmp);
2502 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2504 stats_main_t *sm = &stats_main;
2505 vpe_client_registration_t rp;
2506 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2509 vl_api_registration_t *reg;
2512 mfib = ~0; //Using same mechanism as _per_interface_
2513 rp.client_index = mp->client_index;
2514 rp.client_pid = mp->pid;
2516 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2517 mp->enable_disable);
2520 reg = vl_api_client_index_to_registration (mp->client_index);
2523 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2524 mfib, mp->client_index);
2528 rmp = vl_msg_api_alloc (sizeof (*rmp));
2529 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2530 rmp->context = mp->context;
2531 rmp->retval = retval;
2533 vl_api_send_msg (reg, (u8 *) rmp);
2537 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2539 stats_main_t *sm = &stats_main;
2540 vpe_client_registration_t rp;
2541 vl_api_want_ip4_fib_stats_reply_t *rmp;
2544 vl_api_registration_t *reg;
2547 fib = ~0; //Using same mechanism as _per_interface_
2548 rp.client_index = mp->client_index;
2549 rp.client_pid = mp->pid;
2551 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2552 mp->enable_disable);
2555 reg = vl_api_client_index_to_registration (mp->client_index);
2558 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2559 fib, mp->client_index);
2563 rmp = vl_msg_api_alloc (sizeof (*rmp));
2564 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2565 rmp->context = mp->context;
2566 rmp->retval = retval;
2568 vl_api_send_msg (reg, (u8 *) rmp);
2572 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2574 stats_main_t *sm = &stats_main;
2575 vpe_client_registration_t rp;
2576 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2579 vl_api_registration_t *reg;
2582 mfib = ~0; //Using same mechanism as _per_interface_
2583 rp.client_index = mp->client_index;
2584 rp.client_pid = mp->pid;
2586 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2587 mp->enable_disable);
2590 reg = vl_api_client_index_to_registration (mp->client_index);
2593 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2594 mfib, mp->client_index);
2598 rmp = vl_msg_api_alloc (sizeof (*rmp));
2599 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2600 rmp->context = mp->context;
2601 rmp->retval = retval;
2603 vl_api_send_msg (reg, (u8 *) rmp);
2606 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2608 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2613 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2618 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2620 stats_main_t *sm = &stats_main;
2621 vnet_interface_main_t *im = sm->interface_main;
2622 vl_api_vnet_get_summary_stats_reply_t *rmp;
2623 vlib_combined_counter_main_t *cm;
2626 u64 total_pkts[VLIB_N_RX_TX];
2627 u64 total_bytes[VLIB_N_RX_TX];
2628 vl_api_registration_t *reg;
2630 reg = vl_api_client_index_to_registration (mp->client_index);
2634 rmp = vl_msg_api_alloc (sizeof (*rmp));
2635 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2636 rmp->context = mp->context;
2639 memset (total_pkts, 0, sizeof (total_pkts));
2640 memset (total_bytes, 0, sizeof (total_bytes));
2642 vnet_interface_counter_lock (im);
2644 vec_foreach (cm, im->combined_sw_if_counters)
2646 which = cm - im->combined_sw_if_counters;
2648 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2650 vlib_get_combined_counter (cm, i, &v);
2651 total_pkts[which] += v.packets;
2652 total_bytes[which] += v.bytes;
2655 vnet_interface_counter_unlock (im);
2657 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2658 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2659 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2660 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2662 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2664 vl_api_send_msg (reg, (u8 *) rmp);
2668 stats_memclnt_delete_callback (u32 client_index)
2670 vpe_client_stats_registration_t *rp;
2671 stats_main_t *sm = &stats_main;
2675 /* p = hash_get (sm->stats_registration_hash, client_index); */
2678 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2679 /* pool_put (sm->stats_registrations, rp); */
2680 /* hash_unset (sm->stats_registration_hash, client_index); */
2686 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2687 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2688 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2689 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2690 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2691 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2692 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2693 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2694 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2695 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2696 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2697 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2699 static clib_error_t *
2700 stats_init (vlib_main_t * vm)
2702 stats_main_t *sm = &stats_main;
2703 api_main_t *am = &api_main;
2704 void *vlib_worker_thread_bootstrap_fn (void *arg);
2707 sm->vnet_main = vnet_get_main ();
2708 sm->interface_main = &vnet_get_main ()->interface_main;
2710 sm->stats_poll_interval_in_seconds = 10;
2711 sm->data_structure_lock =
2712 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
2713 CLIB_CACHE_LINE_BYTES);
2714 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
2717 vl_msg_api_set_handlers(VL_API_##N, #n, \
2718 vl_api_##n##_t_handler, \
2720 vl_api_##n##_t_endian, \
2721 vl_api_##n##_t_print, \
2722 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
2726 /* tell the msg infra not to free these messages... */
2727 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
2728 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
2729 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
2730 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
2731 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
2732 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
2735 * Set up the (msg_name, crc, message-id) table
2737 setup_message_id_table (am);
2739 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
2740 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
2741 #define stats_reg(n) \
2742 sm->stats_registrations[IDX_##n] = 0; \
2743 sm->stats_registration_hash[IDX_##n] = 0;
2744 #include <vpp/stats/stats.reg>
2750 VLIB_INIT_FUNCTION (stats_init);
2753 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
2755 .function = stats_thread_fn,
2758 .no_data_structure_clone = 1,
2764 * fd.io coding-style-patch-verification: ON
2767 * eval: (c-set-style "gnu")