2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
24 stats_main_t stats_main;
26 #include <vnet/ip/ip.h>
28 #include <vpp/api/vpe_msg_enum.h>
31 #define f64_print(a,b)
33 #define vl_typedefs /* define message structures */
34 #include <vpp/api/vpe_all_api_h.h>
37 #define vl_endianfun /* define message structures */
38 #include <vpp/api/vpe_all_api_h.h>
41 /* instantiate all the print functions we know about */
42 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
44 #include <vpp/api/vpe_all_api_h.h>
47 #define foreach_stats_msg \
48 _(WANT_STATS, want_stats) \
49 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
50 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
51 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
52 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
53 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
55 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
56 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
57 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
58 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
59 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
60 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
61 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
62 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
63 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
64 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
65 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats)
68 #define vl_msg_name_crc_list
69 #include <vpp/stats/stats.api.h>
70 #undef vl_msg_name_crc_list
73 setup_message_id_table (api_main_t * am)
76 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
77 foreach_vl_msg_name_crc_stats;
81 /* These constants ensure msg sizes <= 1024, aka ring allocation */
82 #define SIMPLE_COUNTER_BATCH_SIZE 126
83 #define COMBINED_COUNTER_BATCH_SIZE 63
84 #define IP4_FIB_COUNTER_BATCH_SIZE 48
85 #define IP6_FIB_COUNTER_BATCH_SIZE 30
86 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
87 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
90 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
94 format_vnet_interface_combined_counters (u8 * s, va_list * args)
96 stats_main_t *sm = &stats_main;
97 vl_api_vnet_interface_combined_counters_t *mp =
98 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
101 u32 count, sw_if_index;
103 count = ntohl (mp->count);
104 sw_if_index = ntohl (mp->first_sw_if_index);
108 vp = (vlib_counter_t *) mp->data;
110 switch (mp->vnet_counter_type)
112 case VNET_INTERFACE_COUNTER_RX:
115 case VNET_INTERFACE_COUNTER_TX:
119 counter_name = "bogus";
122 for (i = 0; i < count; i++)
124 packets = clib_mem_unaligned (&vp->packets, u64);
125 packets = clib_net_to_host_u64 (packets);
126 bytes = clib_mem_unaligned (&vp->bytes, u64);
127 bytes = clib_net_to_host_u64 (bytes);
129 s = format (s, "%U.%s.packets %lld\n",
130 format_vnet_sw_if_index_name,
131 sm->vnet_main, sw_if_index, counter_name, packets);
132 s = format (s, "%U.%s.bytes %lld\n",
133 format_vnet_sw_if_index_name,
134 sm->vnet_main, sw_if_index, counter_name, bytes);
141 format_vnet_interface_simple_counters (u8 * s, va_list * args)
143 stats_main_t *sm = &stats_main;
144 vl_api_vnet_interface_simple_counters_t *mp =
145 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
147 u32 count, sw_if_index;
148 count = ntohl (mp->count);
149 sw_if_index = ntohl (mp->first_sw_if_index);
151 vp = (u64 *) mp->data;
154 switch (mp->vnet_counter_type)
156 case VNET_INTERFACE_COUNTER_DROP:
157 counter_name = "drop";
159 case VNET_INTERFACE_COUNTER_PUNT:
160 counter_name = "punt";
162 case VNET_INTERFACE_COUNTER_IP4:
163 counter_name = "ip4";
165 case VNET_INTERFACE_COUNTER_IP6:
166 counter_name = "ip6";
168 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
169 counter_name = "rx-no-buff";
171 case VNET_INTERFACE_COUNTER_RX_MISS:
172 counter_name = "rx-miss";
174 case VNET_INTERFACE_COUNTER_RX_ERROR:
175 counter_name = "rx-error (fifo-full)";
177 case VNET_INTERFACE_COUNTER_TX_ERROR:
178 counter_name = "tx-error (fifo-full)";
181 counter_name = "bogus";
184 for (i = 0; i < count; i++)
186 v = clib_mem_unaligned (vp, u64);
187 v = clib_net_to_host_u64 (v);
189 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
190 sm->vnet_main, sw_if_index, counter_name, v);
198 dslock (stats_main_t * sm, int release_hint, int tag)
201 data_structure_lock_t *l = sm->data_structure_lock;
203 if (PREDICT_FALSE (l == 0))
206 thread_index = vlib_get_thread_index ();
207 if (l->lock && l->thread_index == thread_index)
216 while (__sync_lock_test_and_set (&l->lock, 1))
219 l->thread_index = thread_index;
224 stats_dslock_with_hint (int hint, int tag)
226 stats_main_t *sm = &stats_main;
227 dslock (sm, hint, tag);
231 dsunlock (stats_main_t * sm)
234 data_structure_lock_t *l = sm->data_structure_lock;
236 if (PREDICT_FALSE (l == 0))
239 thread_index = vlib_get_thread_index ();
240 ASSERT (l->lock && l->thread_index == thread_index);
246 CLIB_MEMORY_BARRIER ();
252 stats_dsunlock (int hint, int tag)
254 stats_main_t *sm = &stats_main;
258 static vpe_client_registration_t *
259 get_client_for_stat (u32 reg, u32 item, u32 client_index)
261 stats_main_t *sm = &stats_main;
262 vpe_client_stats_registration_t *registration;
265 /* Is there anything listening for item in that reg */
266 p = hash_get (sm->stats_registration_hash[reg], item);
271 /* If there is, is our client_index one of them */
272 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
273 p = hash_get (registration->client_hash, client_index);
278 return pool_elt_at_index (registration->clients, p[0]);
283 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
285 stats_main_t *sm = &stats_main;
286 vpe_client_stats_registration_t *registration;
287 vpe_client_registration_t *cr;
290 /* Is there anything listening for item in that reg */
291 p = hash_get (sm->stats_registration_hash[reg], item);
295 pool_get (sm->stats_registrations[reg], registration);
296 registration->item = item;
297 hash_set (sm->stats_registration_hash[reg], item,
298 registration - sm->stats_registrations[reg]);
302 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
305 p = hash_get (registration->client_hash, client->client_index);
309 pool_get (registration->clients, cr);
310 cr->client_index = client->client_index;
311 cr->client_pid = client->client_pid;
312 hash_set (registration->client_hash, cr->client_index,
313 cr - registration->clients);
316 return 1; //At least one client is doing something ... poll
320 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
322 stats_main_t *sm = &stats_main;
323 vpe_client_stats_registration_t *registration;
324 vpe_client_registration_t *client;
328 /* Clear the client first */
329 /* Is there anything listening for item in that reg */
330 p = hash_get (sm->stats_registration_hash[reg], item);
335 /* If there is, is our client_index one of them */
336 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
337 p = hash_get (registration->client_hash, client_index);
342 client = pool_elt_at_index (registration->clients, p[0]);
343 hash_unset (registration->client_hash, client->client_index);
344 pool_put (registration->clients, client);
346 /* Now check if that was the last client for that item */
347 if (0 == pool_elts (registration->clients))
349 hash_unset (sm->stats_registration_hash[reg], item);
350 pool_put (sm->stats_registrations[reg], registration);
355 /* Now check if that was the last item in any of the listened to stats */
356 for (i = 0; i < STATS_REG_N_IDX; i++)
358 elts += pool_elts (sm->stats_registrations[i]);
363 vpe_client_registration_t *
364 get_clients_for_stat (u32 reg, u32 item)
366 stats_main_t *sm = &stats_main;
367 vpe_client_registration_t *client, *clients = 0;
368 vpe_client_stats_registration_t *registration;
371 /* Is there anything listening for item in that reg */
372 p = hash_get (sm->stats_registration_hash[reg], item);
377 /* If there is, is our client_index one of them */
378 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
380 vec_reset_length (clients);
381 pool_foreach (client, registration->clients, (
383 vec_add1 (clients, *client);}
390 clear_client_reg (u32 ** registrations)
392 /* When registrations[x] is a vector of pool indices
393 here is a good place to clean up the pools
395 #define stats_reg(n) vec_free(registrations[IDX_##n]);
396 #include <vpp/stats/stats.reg>
399 vec_free (registrations);
403 init_client_reg (u32 ** registrations)
407 Initialise the stats registrations for each
408 type of stat a client can register for as well as
409 a vector of "interested" indexes.
410 Initially this is a u32 of either sw_if_index or fib_index
411 but eventually this should migrate to a pool_index (u32)
412 with a type specific pool that can include more complex things
413 such as timing and structured events.
415 vec_validate (registrations, STATS_REG_N_IDX);
416 #define stats_reg(n) \
417 vec_reset_length(registrations[IDX_##n]);
418 #include <vpp/stats/stats.reg>
422 When registrations[x] is a vector of pool indices, here
423 is a good place to init the pools.
425 return registrations;
429 enable_all_client_reg (u32 ** registrations)
433 Enable all stats known by adding
434 ~0 to the index vector. Eventually this
435 should be deprecated.
437 #define stats_reg(n) \
438 vec_add1(registrations[IDX_##n], ~0);
439 #include <vpp/stats/stats.reg>
441 return registrations;
445 do_simple_interface_counters (stats_main_t * sm)
447 vl_api_vnet_interface_simple_counters_t *mp = 0;
448 vnet_interface_main_t *im = sm->interface_main;
449 api_main_t *am = sm->api_main;
450 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
451 svm_queue_t *q = shmem_hdr->vl_input_queue;
452 vlib_simple_counter_main_t *cm;
453 u32 items_this_message = 0;
458 * Prevent interface registration from expanding / moving the vectors...
459 * That tends never to happen, so we can hold this lock for a while.
461 vnet_interface_counter_lock (im);
463 vec_foreach (cm, im->sw_if_counters)
465 n_counts = vlib_simple_counter_n_counters (cm);
466 for (i = 0; i < n_counts; i++)
470 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
473 mp = vl_msg_api_alloc_as_if_client
474 (sizeof (*mp) + items_this_message * sizeof (v));
475 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
476 mp->vnet_counter_type = cm - im->sw_if_counters;
477 mp->first_sw_if_index = htonl (i);
479 vp = (u64 *) mp->data;
481 v = vlib_get_simple_counter (cm, i);
482 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
485 if (mp->count == items_this_message)
487 mp->count = htonl (items_this_message);
488 /* Send to the main thread... */
489 vl_msg_api_send_shmem (q, (u8 *) & mp);
495 vnet_interface_counter_unlock (im);
499 handle_client_registration (vpe_client_registration_t * client, u32 stat,
500 u32 item, int enable_disable)
502 stats_main_t *sm = &stats_main;
503 vpe_client_registration_t *rp, _rp;
505 rp = get_client_for_stat (stat, item, client->client_index);
508 if (enable_disable == 0)
510 if (!rp) // No client to disable
512 clib_warning ("pid %d: already disabled for stats...",
517 clear_client_for_stat (stat, item, client->client_index);
524 rp->client_index = client->client_index;
525 rp->client_pid = client->client_pid;
526 sm->enable_poller = set_client_for_stat (stat, item, rp);
531 /**********************************
532 * ALL Interface Combined stats - to be deprecated
533 **********************************/
536 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
539 vl_api_want_interface_combined_stats_t_handler
540 (vl_api_want_interface_combined_stats_t * mp)
542 stats_main_t *sm = &stats_main;
543 vpe_client_registration_t rp;
544 vl_api_want_interface_combined_stats_reply_t *rmp;
550 swif = ~0; //Using same mechanism as _per_interface_
551 rp.client_index = mp->client_index;
552 rp.client_pid = mp->pid;
554 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
558 q = vl_api_client_index_to_input_queue (mp->client_index);
563 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
568 rmp = vl_msg_api_alloc (sizeof (*rmp));
569 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
570 rmp->context = mp->context;
571 rmp->retval = retval;
573 vl_msg_api_send_shmem (q, (u8 *) & rmp);
577 vl_api_vnet_interface_combined_counters_t_handler
578 (vl_api_vnet_interface_combined_counters_t * mp)
580 vpe_client_registration_t *clients, client;
581 stats_main_t *sm = &stats_main;
582 svm_queue_t *q, *q_prev = NULL;
583 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
587 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
590 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
591 ~0 /*flag for all */ );
593 for (i = 0; i < vec_len (clients); i++)
596 q = vl_api_client_index_to_input_queue (client.client_index);
599 if (q_prev && (q_prev->cursize < q_prev->maxsize))
601 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
602 clib_memcpy (mp_copy, mp, mp_size);
603 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
610 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
613 if (q_prev && (q_prev->cursize < q_prev->maxsize))
615 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
619 vl_msg_api_free (mp);
624 do_combined_interface_counters (stats_main_t * sm)
626 vl_api_vnet_interface_combined_counters_t *mp = 0;
627 vnet_interface_main_t *im = sm->interface_main;
628 api_main_t *am = sm->api_main;
629 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
630 svm_queue_t *q = shmem_hdr->vl_input_queue;
631 vlib_combined_counter_main_t *cm;
632 u32 items_this_message = 0;
633 vlib_counter_t v, *vp = 0;
636 vnet_interface_counter_lock (im);
638 vec_foreach (cm, im->combined_sw_if_counters)
640 n_counts = vlib_combined_counter_n_counters (cm);
641 for (i = 0; i < n_counts; i++)
645 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
648 mp = vl_msg_api_alloc_as_if_client
649 (sizeof (*mp) + items_this_message * sizeof (v));
650 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
651 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
652 mp->first_sw_if_index = htonl (i);
654 vp = (vlib_counter_t *) mp->data;
656 vlib_get_combined_counter (cm, i, &v);
657 clib_mem_unaligned (&vp->packets, u64)
658 = clib_host_to_net_u64 (v.packets);
659 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
662 if (mp->count == items_this_message)
664 mp->count = htonl (items_this_message);
665 /* Send to the main thread... */
666 vl_msg_api_send_shmem (q, (u8 *) & mp);
672 vnet_interface_counter_unlock (im);
675 /**********************************
676 * Per Interface Combined stats
677 **********************************/
679 /* Request from client registering interfaces it wants */
681 vl_api_want_per_interface_combined_stats_t_handler
682 (vl_api_want_per_interface_combined_stats_t * mp)
684 stats_main_t *sm = &stats_main;
685 vpe_client_registration_t rp;
686 vl_api_want_per_interface_combined_stats_reply_t *rmp;
687 vlib_combined_counter_main_t *cm;
694 // Validate we have good sw_if_indexes before registering
695 for (i = 0; i < mp->num; i++)
697 swif = mp->sw_ifs[i];
699 /* Check its a real sw_if_index that the client is allowed to see */
702 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
704 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
710 for (i = 0; i < mp->num; i++)
712 swif = mp->sw_ifs[i];
714 rp.client_index = mp->client_index;
715 rp.client_pid = mp->pid;
716 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
717 swif, mp->enable_disable);
721 q = vl_api_client_index_to_input_queue (mp->client_index);
725 for (i = 0; i < mp->num; i++)
727 swif = mp->sw_ifs[i];
729 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
735 rmp = vl_msg_api_alloc (sizeof (*rmp));
736 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
737 rmp->context = mp->context;
738 rmp->retval = retval;
740 vl_msg_api_send_shmem (q, (u8 *) & rmp);
743 /* Per Interface Combined distribution to client */
745 do_combined_per_interface_counters (stats_main_t * sm)
747 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
748 vnet_interface_main_t *im = sm->interface_main;
749 api_main_t *am = sm->api_main;
750 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
751 svm_queue_t *q = NULL;
752 vlib_combined_counter_main_t *cm;
754 * items_this_message will eventually be used to optimise the batching
755 * of per client messages for each stat. For now setting this to 1 then
756 * iterate. This will not affect API.
758 * FIXME instead of enqueueing here, this should be sent to a batch
759 * storer for per-client transmission. Each "mp" sent would be a single entry
760 * and if a client is listening to other sw_if_indexes for same, it would be
761 * appended to that *mp
763 u32 items_this_message = 1;
764 vnet_combined_counter_t *vp = 0;
768 vpe_client_stats_registration_t *reg;
769 vpe_client_registration_t *client;
770 u32 *sw_if_index = 0;
774 - capturing the timestamp of the counters "when VPP knew them" is important.
775 Less so is that the timing of the delivery to the control plane be in the same
778 i.e. As long as the control plane can delta messages from VPP and work out
779 velocity etc based on the timestamp, it can do so in a more "batch mode".
781 It would be beneficial to keep a "per-client" message queue, and then
782 batch all the stat messages for a client into one message, with
785 Given this particular API is for "per interface" one assumes that the scale
786 is less than the ~0 case, which the prior API is suited for.
788 vnet_interface_counter_lock (im);
790 timestamp = vlib_time_now (sm->vlib_main);
792 vec_reset_length (sm->regs_tmp);
794 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
797 vec_add1 (sm->regs_tmp, reg);}));
799 for (i = 0; i < vec_len (sm->regs_tmp); i++)
801 reg = sm->regs_tmp[i];
804 vnet_interface_counter_unlock (im);
805 do_combined_interface_counters (sm);
806 vnet_interface_counter_lock (im);
809 vec_reset_length (sm->clients_tmp);
810 pool_foreach (client, reg->clients, (
812 vec_add1 (sm->clients_tmp,
816 //FIXME - should be doing non-variant part of mp here and managing
817 // any alloc per client in that vec_foreach
818 for (j = 0; j < vec_len (sm->clients_tmp); j++)
820 client = sm->clients_tmp[j];
821 q = vl_api_client_index_to_input_queue (client->client_index);
823 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
827 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
828 reg->item, client->client_index);
832 mp = vl_msg_api_alloc (sizeof (*mp) +
833 (items_this_message *
834 (sizeof (*vp) /* rx */ )));
836 // FIXME when optimising for items_this_message > 1 need to include a
837 // SIMPLE_INTERFACE_BATCH_SIZE check.
839 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
841 mp->count = items_this_message;
842 mp->timestamp = timestamp;
843 vp = (vnet_combined_counter_t *) mp->data;
845 vp->sw_if_index = htonl (reg->item);
847 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
848 vlib_get_combined_counter (cm, reg->item, &v);
849 clib_mem_unaligned (&vp->rx_packets, u64)
850 = clib_host_to_net_u64 (v.packets);
851 clib_mem_unaligned (&vp->rx_bytes, u64) =
852 clib_host_to_net_u64 (v.bytes);
855 /* TX vlib_counter_t packets/bytes */
856 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
857 vlib_get_combined_counter (cm, reg->item, &v);
858 clib_mem_unaligned (&vp->tx_packets, u64)
859 = clib_host_to_net_u64 (v.packets);
860 clib_mem_unaligned (&vp->tx_bytes, u64) =
861 clib_host_to_net_u64 (v.bytes);
863 vl_msg_api_send_shmem (q, (u8 *) & mp);
867 vnet_interface_counter_unlock (im);
870 /**********************************
871 * Per Interface simple stats
872 **********************************/
874 /* Request from client registering interfaces it wants */
876 vl_api_want_per_interface_simple_stats_t_handler
877 (vl_api_want_per_interface_simple_stats_t * mp)
879 stats_main_t *sm = &stats_main;
880 vpe_client_registration_t rp;
881 vl_api_want_per_interface_simple_stats_reply_t *rmp;
882 vlib_simple_counter_main_t *cm;
889 for (i = 0; i < mp->num; i++)
891 swif = mp->sw_ifs[i];
893 /* Check its a real sw_if_index that the client is allowed to see */
896 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
898 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
904 for (i = 0; i < mp->num; i++)
906 swif = mp->sw_ifs[i];
908 rp.client_index = mp->client_index;
909 rp.client_pid = mp->pid;
910 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
911 swif, mp->enable_disable);
915 q = vl_api_client_index_to_input_queue (mp->client_index);
917 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
920 for (i = 0; i < mp->num; i++)
922 swif = mp->sw_ifs[i];
924 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
932 rmp = vl_msg_api_alloc (sizeof (*rmp));
933 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
934 rmp->context = mp->context;
935 rmp->retval = retval;
937 vl_msg_api_send_shmem (q, (u8 *) & rmp);
940 /* Per Interface Simple distribution to client */
942 do_simple_per_interface_counters (stats_main_t * sm)
944 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
945 vnet_interface_main_t *im = sm->interface_main;
946 api_main_t *am = sm->api_main;
947 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
948 svm_queue_t *q = NULL;
949 vlib_simple_counter_main_t *cm;
951 * items_this_message will eventually be used to optimise the batching
952 * of per client messages for each stat. For now setting this to 1 then
953 * iterate. This will not affect API.
955 * FIXME instead of enqueueing here, this should be sent to a batch
956 * storer for per-client transmission. Each "mp" sent would be a single entry
957 * and if a client is listening to other sw_if_indexes for same, it would be
958 * appended to that *mp
960 u32 items_this_message = 1;
962 vpe_client_stats_registration_t *reg;
963 vpe_client_registration_t *client;
966 vnet_simple_counter_t *vp = 0;
971 - capturing the timestamp of the counters "when VPP knew them" is important.
972 Less so is that the timing of the delivery to the control plane be in the same
975 i.e. As long as the control plane can delta messages from VPP and work out
976 velocity etc based on the timestamp, it can do so in a more "batch mode".
978 It would be beneficial to keep a "per-client" message queue, and then
979 batch all the stat messages for a client into one message, with
982 Given this particular API is for "per interface" one assumes that the scale
983 is less than the ~0 case, which the prior API is suited for.
985 vnet_interface_counter_lock (im);
987 timestamp = vlib_time_now (sm->vlib_main);
989 vec_reset_length (sm->regs_tmp);
991 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], (
997 for (i = 0; i < vec_len (sm->regs_tmp); i++)
999 reg = sm->regs_tmp[i];
1000 if (reg->item == ~0)
1002 vnet_interface_counter_unlock (im);
1003 do_simple_interface_counters (sm);
1004 vnet_interface_counter_lock (im);
1007 vec_reset_length (sm->clients_tmp);
1008 pool_foreach (client, reg->clients, (
1010 vec_add1 (sm->clients_tmp,
1014 //FIXME - should be doing non-variant part of mp here and managing
1015 // any alloc per client in that vec_foreach
1016 for (j = 0; j < vec_len (sm->clients_tmp); j++)
1018 client = sm->clients_tmp[j];
1019 q = vl_api_client_index_to_input_queue (client->client_index);
1021 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
1025 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1026 reg->item, client->client_index);
1030 size = (sizeof (*mp) + (items_this_message * (sizeof (u64) * 10)));
1031 mp = vl_msg_api_alloc (size);
1032 // FIXME when optimising for items_this_message > 1 need to include a
1033 // SIMPLE_INTERFACE_BATCH_SIZE check.
1034 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1036 mp->count = items_this_message;
1037 mp->timestamp = timestamp;
1038 vp = (vnet_simple_counter_t *) mp->data;
1040 vp->sw_if_index = htonl (reg->item);
1042 //FIXME will be simpler with a preprocessor macro
1043 // VNET_INTERFACE_COUNTER_DROP
1044 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1045 v = vlib_get_simple_counter (cm, reg->item);
1046 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1048 // VNET_INTERFACE_COUNTER_PUNT
1049 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1050 v = vlib_get_simple_counter (cm, reg->item);
1051 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1053 // VNET_INTERFACE_COUNTER_IP4
1054 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1055 v = vlib_get_simple_counter (cm, reg->item);
1056 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1058 //VNET_INTERFACE_COUNTER_IP6
1059 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1060 v = vlib_get_simple_counter (cm, reg->item);
1061 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1063 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1064 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1065 v = vlib_get_simple_counter (cm, reg->item);
1066 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1067 clib_host_to_net_u64 (v);
1069 //VNET_INTERFACE_COUNTER_RX_MISS
1070 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1071 v = vlib_get_simple_counter (cm, reg->item);
1072 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1074 //VNET_INTERFACE_COUNTER_RX_ERROR
1075 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1076 v = vlib_get_simple_counter (cm, reg->item);
1077 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1079 //VNET_INTERFACE_COUNTER_TX_ERROR
1080 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1081 v = vlib_get_simple_counter (cm, reg->item);
1082 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1084 //VNET_INTERFACE_COUNTER_MPLS
1085 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1086 v = vlib_get_simple_counter (cm, reg->item);
1087 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1089 vl_msg_api_send_shmem (q, (u8 *) & mp);
1093 vnet_interface_counter_unlock (im);
1096 /**********************************
1098 **********************************/
1101 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1103 struct timespec _req, *req = &_req;
1104 struct timespec _rem, *rem = &_rem;
1107 req->tv_nsec = nsec;
1110 if (nanosleep (req, rem) == 0)
1115 clib_unix_warning ("nanosleep");
1121 * @brief The context passed when collecting adjacency counters
1123 typedef struct ip4_nbr_stats_ctx_t_
1126 * The SW IF index all these adjs belong to
1131 * A vector of ip4 nbr counters
1133 vl_api_ip4_nbr_counter_t *counters;
1134 } ip4_nbr_stats_ctx_t;
1136 static adj_walk_rc_t
1137 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1139 vl_api_ip4_nbr_counter_t *vl_counter;
1140 vlib_counter_t adj_counter;
1141 ip4_nbr_stats_ctx_t *ctx;
1142 ip_adjacency_t *adj;
1145 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1147 if (0 != adj_counter.packets)
1149 vec_add2 (ctx->counters, vl_counter, 1);
1152 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1153 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1154 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1155 vl_counter->link_type = adj->ia_link;
1157 return (ADJ_WALK_RC_CONTINUE);
1160 #define MIN(x,y) (((x)<(y))?(x):(y))
1163 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1165 api_main_t *am = sm->api_main;
1166 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1167 svm_queue_t *q = shmem_hdr->vl_input_queue;
1168 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1172 * If the walk context has counters, which may be left over from the last
1173 * suspend, then we continue from there.
1175 while (0 != vec_len (ctx->counters))
1177 u32 n_items = MIN (vec_len (ctx->counters),
1178 IP4_FIB_COUNTER_BATCH_SIZE);
1181 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1183 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1186 (vl_api_ip4_nbr_counter_t)));
1187 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1188 mp->count = ntohl (n_items);
1189 mp->sw_if_index = ntohl (ctx->sw_if_index);
1194 * copy the counters from the back of the context, then we can easily
1195 * 'erase' them by resetting the vector length.
1196 * The order we push the stats to the caller is not important.
1199 &ctx->counters[vec_len (ctx->counters) - n_items],
1200 n_items * sizeof (*ctx->counters));
1202 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1208 pause = svm_queue_is_full (q);
1210 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1211 svm_queue_unlock (q);
1215 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1216 STATS_RELEASE_DELAY_NS);
1221 do_ip4_nbr_counters (stats_main_t * sm)
1223 vnet_main_t *vnm = vnet_get_main ();
1224 vnet_interface_main_t *im = &vnm->interface_main;
1225 vnet_sw_interface_t *si;
1227 ip4_nbr_stats_ctx_t ctx = {
1233 pool_foreach (si, im->sw_interfaces,
1236 * update the interface we are now concerned with
1238 ctx.sw_if_index = si->sw_if_index;
1241 * we are about to walk another interface, so we shouldn't have any pending
1244 ASSERT(ctx.counters == NULL);
1247 * visit each neighbour adjacency on the interface and collect
1248 * its current stats.
1249 * Because we hold the lock the walk is synchronous, so safe to routing
1250 * updates. It's limited in work by the number of adjacenies on an
1251 * interface, which is typically not huge.
1253 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1254 adj_nbr_walk (si->sw_if_index,
1261 * if this interface has some adjacencies with counters then ship them,
1262 * else continue to the next interface.
1264 if (NULL != ctx.counters)
1266 ip4_nbr_ship(sm, &ctx);
1273 * @brief The context passed when collecting adjacency counters
1275 typedef struct ip6_nbr_stats_ctx_t_
1278 * The SW IF index all these adjs belong to
1283 * A vector of ip6 nbr counters
1285 vl_api_ip6_nbr_counter_t *counters;
1286 } ip6_nbr_stats_ctx_t;
1288 static adj_walk_rc_t
1289 ip6_nbr_stats_cb (adj_index_t ai,
1292 vl_api_ip6_nbr_counter_t *vl_counter;
1293 vlib_counter_t adj_counter;
1294 ip6_nbr_stats_ctx_t *ctx;
1295 ip_adjacency_t *adj;
1298 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1300 if (0 != adj_counter.packets)
1302 vec_add2(ctx->counters, vl_counter, 1);
1305 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1306 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1307 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1308 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1309 vl_counter->link_type = adj->ia_link;
1311 return (ADJ_WALK_RC_CONTINUE);
1314 #define MIN(x,y) (((x)<(y))?(x):(y))
1317 ip6_nbr_ship (stats_main_t * sm,
1318 ip6_nbr_stats_ctx_t *ctx)
1320 api_main_t *am = sm->api_main;
1321 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1322 svm_queue_t *q = shmem_hdr->vl_input_queue;
1323 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1327 * If the walk context has counters, which may be left over from the last
1328 * suspend, then we continue from there.
1330 while (0 != vec_len(ctx->counters))
1332 u32 n_items = MIN (vec_len (ctx->counters),
1333 IP6_FIB_COUNTER_BATCH_SIZE);
1336 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1338 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1341 (vl_api_ip6_nbr_counter_t)));
1342 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1343 mp->count = ntohl (n_items);
1344 mp->sw_if_index = ntohl (ctx->sw_if_index);
1349 * copy the counters from the back of the context, then we can easily
1350 * 'erase' them by resetting the vector length.
1351 * The order we push the stats to the caller is not important.
1354 &ctx->counters[vec_len (ctx->counters) - n_items],
1355 n_items * sizeof (*ctx->counters));
1357 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1363 pause = svm_queue_is_full (q);
1365 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1366 svm_queue_unlock (q);
1370 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1371 STATS_RELEASE_DELAY_NS);
1376 do_ip6_nbr_counters (stats_main_t * sm)
1378 vnet_main_t *vnm = vnet_get_main ();
1379 vnet_interface_main_t *im = &vnm->interface_main;
1380 vnet_sw_interface_t *si;
1382 ip6_nbr_stats_ctx_t ctx = {
1388 pool_foreach (si, im->sw_interfaces,
1391 * update the interface we are now concerned with
1393 ctx.sw_if_index = si->sw_if_index;
1396 * we are about to walk another interface, so we shouldn't have any pending
1399 ASSERT(ctx.counters == NULL);
1402 * visit each neighbour adjacency on the interface and collect
1403 * its current stats.
1404 * Because we hold the lock the walk is synchronous, so safe to routing
1405 * updates. It's limited in work by the number of adjacenies on an
1406 * interface, which is typically not huge.
1408 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1409 adj_nbr_walk (si->sw_if_index,
1416 * if this interface has some adjacencies with counters then ship them,
1417 * else continue to the next interface.
1419 if (NULL != ctx.counters)
1421 ip6_nbr_ship(sm, &ctx);
1428 do_ip4_fib_counters (stats_main_t * sm)
1430 ip4_main_t *im4 = &ip4_main;
1431 api_main_t *am = sm->api_main;
1432 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1433 svm_queue_t *q = shmem_hdr->vl_input_queue;
1437 do_ip46_fibs_t *do_fibs;
1438 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1439 u32 items_this_message;
1440 vl_api_ip4_fib_counter_t *ctrp = 0;
1441 u32 start_at_fib_index = 0;
1444 do_fibs = &sm->do_ip46_fibs;
1447 vec_reset_length (do_fibs->fibs);
1449 pool_foreach (fib, im4->fibs,
1450 ({vec_add1(do_fibs->fibs,fib);}));
1454 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1456 fib = do_fibs->fibs[j];
1457 /* We may have bailed out due to control-plane activity */
1458 while ((fib - im4->fibs) < start_at_fib_index)
1461 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1465 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1466 mp = vl_msg_api_alloc_as_if_client
1468 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1469 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1471 mp->vrf_id = ntohl (fib->ft_table_id);
1472 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1476 /* happens if the last FIB was empty... */
1477 ASSERT (mp->count == 0);
1478 mp->vrf_id = ntohl (fib->ft_table_id);
1481 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1483 vec_reset_length (do_fibs->ip4routes);
1484 vec_reset_length (do_fibs->results);
1486 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1488 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1492 vec_reset_length (do_fibs->pvec);
1494 x.address_length = i;
1496 hash_foreach_pair (p, hash, (
1498 vec_add1 (do_fibs->pvec, p);}
1500 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1502 p = do_fibs->pvec[k];
1503 x.address.data_u32 = p->key;
1504 x.index = p->value[0];
1506 vec_add1 (do_fibs->ip4routes, x);
1507 if (sm->data_structure_lock->release_hint)
1509 start_at_fib_index = fib - im4->fibs;
1511 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1512 STATS_RELEASE_DELAY_NS);
1514 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1520 vec_foreach (r, do_fibs->ip4routes)
1523 const dpo_id_t *dpo_id;
1526 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1527 index = (u32) dpo_id->dpoi_index;
1529 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1532 * If it has actually
1533 * seen at least one packet, send it.
1538 /* already in net byte order */
1539 ctrp->address = r->address.as_u32;
1540 ctrp->address_length = r->address_length;
1541 ctrp->packets = clib_host_to_net_u64 (c.packets);
1542 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1546 if (mp->count == items_this_message)
1548 mp->count = htonl (items_this_message);
1550 * If the main thread's input queue is stuffed,
1551 * drop the data structure lock (which the main thread
1552 * may want), and take a pause.
1555 if (svm_queue_is_full (q))
1558 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1559 svm_queue_unlock (q);
1561 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1562 STATS_RELEASE_DELAY_NS);
1565 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1566 svm_queue_unlock (q);
1568 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1569 mp = vl_msg_api_alloc_as_if_client
1571 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1572 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1574 mp->vrf_id = ntohl (fib->ft_table_id);
1575 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1577 } /* for each (mp or single) adj */
1578 if (sm->data_structure_lock->release_hint)
1580 start_at_fib_index = fib - im4->fibs;
1582 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1584 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1587 } /* vec_foreach (routes) */
1591 /* Flush any data from this fib */
1594 mp->count = htonl (mp->count);
1595 vl_msg_api_send_shmem (q, (u8 *) & mp);
1600 /* If e.g. the last FIB had no reportable routes, free the buffer */
1602 vl_msg_api_free (mp);
1606 mfib_table_stats_walk_cb (fib_node_index_t fei, void *ctx)
1608 stats_main_t *sm = ctx;
1609 do_ip46_fibs_t *do_fibs;
1610 mfib_entry_t *entry;
1612 do_fibs = &sm->do_ip46_fibs;
1613 entry = mfib_entry_get (fei);
1615 vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1621 do_ip4_mfib_counters (stats_main_t * sm)
1623 ip4_main_t *im4 = &ip4_main;
1624 api_main_t *am = sm->api_main;
1625 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1626 svm_queue_t *q = shmem_hdr->vl_input_queue;
1629 do_ip46_fibs_t *do_fibs;
1630 vl_api_vnet_ip4_mfib_counters_t *mp = 0;
1631 u32 items_this_message;
1632 vl_api_ip4_mfib_counter_t *ctrp = 0;
1633 u32 start_at_mfib_index = 0;
1636 do_fibs = &sm->do_ip46_fibs;
1638 vec_reset_length (do_fibs->mfibs);
1640 pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1643 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1645 mfib = do_fibs->mfibs[j];
1646 /* We may have bailed out due to control-plane activity */
1647 while ((mfib - im4->mfibs) < start_at_mfib_index)
1652 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1653 mp = vl_msg_api_alloc_as_if_client
1655 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1656 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1658 mp->vrf_id = ntohl (mfib->mft_table_id);
1659 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1663 /* happens if the last MFIB was empty... */
1664 ASSERT (mp->count == 0);
1665 mp->vrf_id = ntohl (mfib->mft_table_id);
1668 vec_reset_length (do_fibs->mroutes);
1671 * walk the table with table updates blocked
1673 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1675 mfib_table_walk (mfib->mft_index,
1676 FIB_PROTOCOL_IP4, mfib_table_stats_walk_cb, sm);
1679 vec_foreach (pfx, do_fibs->mroutes)
1681 const dpo_id_t *dpo_id;
1682 fib_node_index_t mfei;
1687 * re-lookup the entry, since we suspend during the collection
1689 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1691 if (FIB_NODE_INDEX_INVALID == mfei)
1694 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1695 index = (u32) dpo_id->dpoi_index;
1697 vlib_get_combined_counter (&replicate_main.repm_counters,
1698 dpo_id->dpoi_index, &c);
1700 * If it has seen at least one packet, send it.
1704 /* already in net byte order */
1705 memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1706 memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1707 ctrp->group_length = pfx->fp_len;
1708 ctrp->packets = clib_host_to_net_u64 (c.packets);
1709 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1713 if (mp->count == items_this_message)
1715 mp->count = htonl (items_this_message);
1717 * If the main thread's input queue is stuffed,
1718 * drop the data structure lock (which the main thread
1719 * may want), and take a pause.
1723 while (svm_queue_is_full (q))
1725 svm_queue_unlock (q);
1726 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1727 STATS_RELEASE_DELAY_NS);
1730 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1731 svm_queue_unlock (q);
1733 items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1734 mp = vl_msg_api_alloc_as_if_client
1736 items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1737 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1739 mp->vrf_id = ntohl (mfib->mft_table_id);
1740 ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1745 /* Flush any data from this mfib */
1748 mp->count = htonl (mp->count);
1749 vl_msg_api_send_shmem (q, (u8 *) & mp);
1754 /* If e.g. the last FIB had no reportable routes, free the buffer */
1756 vl_msg_api_free (mp);
1760 do_ip6_mfib_counters (stats_main_t * sm)
1762 ip6_main_t *im6 = &ip6_main;
1763 api_main_t *am = sm->api_main;
1764 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1765 svm_queue_t *q = shmem_hdr->vl_input_queue;
1768 do_ip46_fibs_t *do_fibs;
1769 vl_api_vnet_ip6_mfib_counters_t *mp = 0;
1770 u32 items_this_message;
1771 vl_api_ip6_mfib_counter_t *ctrp = 0;
1772 u32 start_at_mfib_index = 0;
1775 do_fibs = &sm->do_ip46_fibs;
1777 vec_reset_length (do_fibs->mfibs);
1779 pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1782 for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1784 mfib = do_fibs->mfibs[j];
1785 /* We may have bailed out due to control-plane activity */
1786 while ((mfib - im6->mfibs) < start_at_mfib_index)
1791 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1792 mp = vl_msg_api_alloc_as_if_client
1794 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1795 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1797 mp->vrf_id = ntohl (mfib->mft_table_id);
1798 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1802 /* happens if the last MFIB was empty... */
1803 ASSERT (mp->count == 0);
1804 mp->vrf_id = ntohl (mfib->mft_table_id);
1807 vec_reset_length (do_fibs->mroutes);
1810 * walk the table with table updates blocked
1812 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1814 mfib_table_walk (mfib->mft_index,
1815 FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
1818 vec_foreach (pfx, do_fibs->mroutes)
1820 const dpo_id_t *dpo_id;
1821 fib_node_index_t mfei;
1826 * re-lookup the entry, since we suspend during the collection
1828 mfei = mfib_table_lookup (mfib->mft_index, pfx);
1830 if (FIB_NODE_INDEX_INVALID == mfei)
1833 dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1834 index = (u32) dpo_id->dpoi_index;
1836 vlib_get_combined_counter (&replicate_main.repm_counters,
1837 dpo_id->dpoi_index, &c);
1839 * If it has seen at least one packet, send it.
1843 /* already in net byte order */
1844 memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1845 memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1846 ctrp->group_length = pfx->fp_len;
1847 ctrp->packets = clib_host_to_net_u64 (c.packets);
1848 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1852 if (mp->count == items_this_message)
1854 mp->count = htonl (items_this_message);
1856 * If the main thread's input queue is stuffed,
1857 * drop the data structure lock (which the main thread
1858 * may want), and take a pause.
1862 while (svm_queue_is_full (q))
1864 svm_queue_unlock (q);
1865 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1866 STATS_RELEASE_DELAY_NS);
1869 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1870 svm_queue_unlock (q);
1872 items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1873 mp = vl_msg_api_alloc_as_if_client
1875 items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1876 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1878 mp->vrf_id = ntohl (mfib->mft_table_id);
1879 ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1884 /* Flush any data from this mfib */
1887 mp->count = htonl (mp->count);
1888 vl_msg_api_send_shmem (q, (u8 *) & mp);
1893 /* If e.g. the last FIB had no reportable routes, free the buffer */
1895 vl_msg_api_free (mp);
1901 ip6_route_t **routep;
1903 } add_routes_in_fib_arg_t;
1906 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1908 add_routes_in_fib_arg_t *ap = arg;
1909 stats_main_t *sm = ap->sm;
1911 if (sm->data_structure_lock->release_hint)
1912 clib_longjmp (&sm->jmp_buf, 1);
1914 if (kvp->key[2] >> 32 == ap->fib_index)
1916 ip6_address_t *addr;
1918 addr = (ip6_address_t *) kvp;
1919 vec_add2 (*ap->routep, r, 1);
1920 r->address = addr[0];
1921 r->address_length = kvp->key[2] & 0xFF;
1922 r->index = kvp->value;
1927 do_ip6_fib_counters (stats_main_t * sm)
1929 ip6_main_t *im6 = &ip6_main;
1930 api_main_t *am = sm->api_main;
1931 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1932 svm_queue_t *q = shmem_hdr->vl_input_queue;
1935 do_ip46_fibs_t *do_fibs;
1936 vl_api_vnet_ip6_fib_counters_t *mp = 0;
1937 u32 items_this_message;
1938 vl_api_ip6_fib_counter_t *ctrp = 0;
1939 u32 start_at_fib_index = 0;
1940 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
1941 add_routes_in_fib_arg_t _a, *a = &_a;
1944 do_fibs = &sm->do_ip46_fibs;
1946 vec_reset_length (do_fibs->fibs);
1948 pool_foreach (fib, im6->fibs,
1949 ({vec_add1(do_fibs->fibs,fib);}));
1953 for (i = 0; i < vec_len (do_fibs->fibs); i++)
1955 fib = do_fibs->fibs[i];
1956 /* We may have bailed out due to control-plane activity */
1957 while ((fib - im6->fibs) < start_at_fib_index)
1962 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1963 mp = vl_msg_api_alloc_as_if_client
1965 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1966 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1968 mp->vrf_id = ntohl (fib->ft_table_id);
1969 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1972 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1974 vec_reset_length (do_fibs->ip6routes);
1975 vec_reset_length (do_fibs->results);
1977 a->fib_index = fib - im6->fibs;
1978 a->routep = &do_fibs->ip6routes;
1981 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
1983 start_at_fib_index = fib - im6->fibs;
1984 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
1989 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1990 STATS_RELEASE_DELAY_NS);
1992 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1996 vec_foreach (r, do_fibs->ip6routes)
2000 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
2003 * If it has actually
2004 * seen at least one packet, send it.
2008 /* already in net byte order */
2009 ctrp->address[0] = r->address.as_u64[0];
2010 ctrp->address[1] = r->address.as_u64[1];
2011 ctrp->address_length = (u8) r->address_length;
2012 ctrp->packets = clib_host_to_net_u64 (c.packets);
2013 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2017 if (mp->count == items_this_message)
2019 mp->count = htonl (items_this_message);
2021 * If the main thread's input queue is stuffed,
2022 * drop the data structure lock (which the main thread
2023 * may want), and take a pause.
2026 if (svm_queue_is_full (q))
2029 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2030 svm_queue_unlock (q);
2032 ip46_fib_stats_delay (sm, 0 /* sec */ ,
2033 STATS_RELEASE_DELAY_NS);
2036 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2037 svm_queue_unlock (q);
2039 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2040 mp = vl_msg_api_alloc_as_if_client
2042 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2043 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2045 mp->vrf_id = ntohl (fib->ft_table_id);
2046 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2050 if (sm->data_structure_lock->release_hint)
2052 start_at_fib_index = fib - im6->fibs;
2054 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2056 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2059 } /* vec_foreach (routes) */
2063 /* Flush any data from this fib */
2066 mp->count = htonl (mp->count);
2067 vl_msg_api_send_shmem (q, (u8 *) & mp);
2072 /* If e.g. the last FIB had no reportable routes, free the buffer */
2074 vl_msg_api_free (mp);
2078 stats_thread_fn (void *arg)
2080 stats_main_t *sm = &stats_main;
2081 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
2082 vlib_thread_main_t *tm = vlib_get_thread_main ();
2084 /* stats thread wants no signals. */
2088 pthread_sigmask (SIG_SETMASK, &s, 0);
2091 if (vec_len (tm->thread_prefix))
2092 vlib_set_thread_name ((char *)
2093 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2095 clib_mem_set_heap (w->thread_mheap);
2099 /* 10 second poll interval */
2100 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
2102 if (!(sm->enable_poller))
2107 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2108 do_combined_per_interface_counters (sm);
2111 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2112 do_simple_per_interface_counters (sm);
2114 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2115 do_ip4_fib_counters (sm);
2117 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2118 do_ip6_fib_counters (sm);
2120 if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2121 do_ip4_mfib_counters (sm);
2123 if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2124 do_ip6_mfib_counters (sm);
2126 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2127 do_ip4_nbr_counters (sm);
2129 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2130 do_ip6_nbr_counters (sm);
2135 vl_api_vnet_interface_simple_counters_t_handler
2136 (vl_api_vnet_interface_simple_counters_t * mp)
2138 vpe_client_registration_t *clients, client;
2139 stats_main_t *sm = &stats_main;
2140 svm_queue_t *q, *q_prev = NULL;
2141 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
2145 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2148 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2149 ~0 /*flag for all */ );
2151 for (i = 0; i < vec_len (clients); i++)
2153 client = clients[i];
2154 q = vl_api_client_index_to_input_queue (client.client_index);
2157 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2159 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2160 clib_memcpy (mp_copy, mp, mp_size);
2161 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2169 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2170 client.client_index);
2176 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2179 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2181 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2185 vl_msg_api_free (mp);
2190 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
2192 stats_main_t *sm = &stats_main;
2193 svm_queue_t *q, *q_prev = NULL;
2194 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
2196 vpe_client_registration_t *clients, client;
2199 mp_size = sizeof (*mp_copy) +
2200 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2203 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2205 for (i = 0; i < vec_len (clients); i++)
2207 client = clients[i];
2208 q = vl_api_client_index_to_input_queue (client.client_index);
2211 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2213 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2214 clib_memcpy (mp_copy, mp, mp_size);
2215 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2222 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2223 ~0, client.client_index);
2228 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2230 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2234 vl_msg_api_free (mp);
2239 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
2241 stats_main_t *sm = &stats_main;
2242 svm_queue_t *q, *q_prev = NULL;
2243 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
2245 vpe_client_registration_t *clients, client;
2248 mp_size = sizeof (*mp_copy) +
2249 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2252 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2254 for (i = 0; i < vec_len (clients); i++)
2256 client = clients[i];
2257 q = vl_api_client_index_to_input_queue (client.client_index);
2260 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2262 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2263 clib_memcpy (mp_copy, mp, mp_size);
2264 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2271 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2272 ~0, client.client_index);
2278 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2280 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2284 vl_msg_api_free (mp);
2289 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
2291 stats_main_t *sm = &stats_main;
2292 svm_queue_t *q, *q_prev = NULL;
2293 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
2295 vpe_client_registration_t *clients, client;
2298 mp_size = sizeof (*mp_copy) +
2299 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2302 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2304 for (i = 0; i < vec_len (clients); i++)
2306 client = clients[i];
2307 q = vl_api_client_index_to_input_queue (client.client_index);
2310 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2312 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2313 clib_memcpy (mp_copy, mp, mp_size);
2314 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2321 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2322 ~0, client.client_index);
2327 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2329 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2333 vl_msg_api_free (mp);
2338 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2340 stats_main_t *sm = &stats_main;
2341 svm_queue_t *q, *q_prev = NULL;
2342 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2344 vpe_client_registration_t *clients, client;
2347 mp_size = sizeof (*mp_copy) +
2348 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2351 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2353 for (i = 0; i < vec_len (clients); i++)
2355 client = clients[i];
2356 q = vl_api_client_index_to_input_queue (client.client_index);
2359 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2361 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2362 clib_memcpy (mp_copy, mp, mp_size);
2363 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2370 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2371 ~0, client.client_index);
2376 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2378 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2382 vl_msg_api_free (mp);
2387 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2389 stats_main_t *sm = &stats_main;
2390 vpe_client_registration_t rp;
2391 vl_api_want_stats_reply_t *rmp;
2397 item = ~0; //"ALL THE THINGS IN THE THINGS
2398 rp.client_index = mp->client_index;
2399 rp.client_pid = mp->pid;
2401 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2402 item, mp->enable_disable);
2404 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2405 item, mp->enable_disable);
2407 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2408 item, mp->enable_disable);
2410 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2411 item, mp->enable_disable);
2413 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2414 item, mp->enable_disable);
2416 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2417 item, mp->enable_disable);
2420 q = vl_api_client_index_to_input_queue (mp->client_index);
2425 rmp = vl_msg_api_alloc (sizeof (*rmp));
2426 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2427 rmp->context = mp->context;
2428 rmp->retval = retval;
2430 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2434 vl_api_want_interface_simple_stats_t_handler
2435 (vl_api_want_interface_simple_stats_t * mp)
2437 stats_main_t *sm = &stats_main;
2438 vpe_client_registration_t rp;
2439 vl_api_want_interface_simple_stats_reply_t *rmp;
2445 swif = ~0; //Using same mechanism as _per_interface_
2446 rp.client_index = mp->client_index;
2447 rp.client_pid = mp->pid;
2449 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2450 mp->enable_disable);
2453 q = vl_api_client_index_to_input_queue (mp->client_index);
2458 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2463 rmp = vl_msg_api_alloc (sizeof (*rmp));
2464 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2465 rmp->context = mp->context;
2466 rmp->retval = retval;
2468 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2473 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2475 stats_main_t *sm = &stats_main;
2476 vpe_client_registration_t rp;
2477 vl_api_want_ip4_fib_stats_reply_t *rmp;
2483 fib = ~0; //Using same mechanism as _per_interface_
2484 rp.client_index = mp->client_index;
2485 rp.client_pid = mp->pid;
2487 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2488 mp->enable_disable);
2491 q = vl_api_client_index_to_input_queue (mp->client_index);
2495 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2496 fib, mp->client_index);
2500 rmp = vl_msg_api_alloc (sizeof (*rmp));
2501 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2502 rmp->context = mp->context;
2503 rmp->retval = retval;
2505 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2509 vl_api_want_ip4_mfib_stats_t_handler (vl_api_want_ip4_mfib_stats_t * mp)
2511 stats_main_t *sm = &stats_main;
2512 vpe_client_registration_t rp;
2513 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2519 mfib = ~0; //Using same mechanism as _per_interface_
2520 rp.client_index = mp->client_index;
2521 rp.client_pid = mp->pid;
2523 handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2524 mp->enable_disable);
2527 q = vl_api_client_index_to_input_queue (mp->client_index);
2531 sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2532 mfib, mp->client_index);
2536 rmp = vl_msg_api_alloc (sizeof (*rmp));
2537 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2538 rmp->context = mp->context;
2539 rmp->retval = retval;
2541 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2545 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2547 stats_main_t *sm = &stats_main;
2548 vpe_client_registration_t rp;
2549 vl_api_want_ip4_fib_stats_reply_t *rmp;
2555 fib = ~0; //Using same mechanism as _per_interface_
2556 rp.client_index = mp->client_index;
2557 rp.client_pid = mp->pid;
2559 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2560 mp->enable_disable);
2563 q = vl_api_client_index_to_input_queue (mp->client_index);
2567 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2568 fib, mp->client_index);
2572 rmp = vl_msg_api_alloc (sizeof (*rmp));
2573 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2574 rmp->context = mp->context;
2575 rmp->retval = retval;
2577 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2581 vl_api_want_ip6_mfib_stats_t_handler (vl_api_want_ip6_mfib_stats_t * mp)
2583 stats_main_t *sm = &stats_main;
2584 vpe_client_registration_t rp;
2585 vl_api_want_ip4_mfib_stats_reply_t *rmp;
2591 mfib = ~0; //Using same mechanism as _per_interface_
2592 rp.client_index = mp->client_index;
2593 rp.client_pid = mp->pid;
2595 handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2596 mp->enable_disable);
2599 q = vl_api_client_index_to_input_queue (mp->client_index);
2603 sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2604 mfib, mp->client_index);
2608 rmp = vl_msg_api_alloc (sizeof (*rmp));
2609 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2610 rmp->context = mp->context;
2611 rmp->retval = retval;
2613 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2616 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2618 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2623 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2628 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2630 stats_main_t *sm = &stats_main;
2631 vnet_interface_main_t *im = sm->interface_main;
2632 vl_api_vnet_get_summary_stats_reply_t *rmp;
2633 vlib_combined_counter_main_t *cm;
2636 u64 total_pkts[VLIB_N_RX_TX];
2637 u64 total_bytes[VLIB_N_RX_TX];
2639 svm_queue_t *q = vl_api_client_index_to_input_queue (mp->client_index);
2646 rmp = vl_msg_api_alloc (sizeof (*rmp));
2647 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2648 rmp->context = mp->context;
2651 memset (total_pkts, 0, sizeof (total_pkts));
2652 memset (total_bytes, 0, sizeof (total_bytes));
2654 vnet_interface_counter_lock (im);
2656 vec_foreach (cm, im->combined_sw_if_counters)
2658 which = cm - im->combined_sw_if_counters;
2660 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2662 vlib_get_combined_counter (cm, i, &v);
2663 total_pkts[which] += v.packets;
2664 total_bytes[which] += v.bytes;
2667 vnet_interface_counter_unlock (im);
2669 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2670 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2671 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2672 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2674 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2676 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2680 stats_memclnt_delete_callback (u32 client_index)
2682 vpe_client_stats_registration_t *rp;
2683 stats_main_t *sm = &stats_main;
2687 /* p = hash_get (sm->stats_registration_hash, client_index); */
2690 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2691 /* pool_put (sm->stats_registrations, rp); */
2692 /* hash_unset (sm->stats_registration_hash, client_index); */
2698 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2699 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2700 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2701 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2702 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2703 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2704 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2705 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2706 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2707 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2708 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2709 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2711 static clib_error_t *
2712 stats_init (vlib_main_t * vm)
2714 stats_main_t *sm = &stats_main;
2715 api_main_t *am = &api_main;
2716 void *vlib_worker_thread_bootstrap_fn (void *arg);
2719 sm->vnet_main = vnet_get_main ();
2720 sm->interface_main = &vnet_get_main ()->interface_main;
2722 sm->stats_poll_interval_in_seconds = 10;
2723 sm->data_structure_lock =
2724 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
2725 CLIB_CACHE_LINE_BYTES);
2726 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
2729 vl_msg_api_set_handlers(VL_API_##N, #n, \
2730 vl_api_##n##_t_handler, \
2732 vl_api_##n##_t_endian, \
2733 vl_api_##n##_t_print, \
2734 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
2738 /* tell the msg infra not to free these messages... */
2739 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
2740 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
2741 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
2742 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
2743 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
2744 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
2747 * Set up the (msg_name, crc, message-id) table
2749 setup_message_id_table (am);
2751 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
2752 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
2753 #define stats_reg(n) \
2754 sm->stats_registrations[IDX_##n] = 0; \
2755 sm->stats_registration_hash[IDX_##n] = 0;
2756 #include <vpp/stats/stats.reg>
2762 VLIB_INIT_FUNCTION (stats_init);
2765 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
2767 .function = stats_thread_fn,
2770 .no_data_structure_clone = 1,
2776 * fd.io coding-style-patch-verification: ON
2779 * eval: (c-set-style "gnu")