2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/dpo/load_balance.h>
23 stats_main_t stats_main;
25 #include <vnet/ip/ip.h>
27 #include <vpp/api/vpe_msg_enum.h>
30 #define f64_print(a,b)
32 #define vl_typedefs /* define message structures */
33 #include <vpp/api/vpe_all_api_h.h>
36 #define vl_endianfun /* define message structures */
37 #include <vpp/api/vpe_all_api_h.h>
40 /* instantiate all the print functions we know about */
41 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
43 #include <vpp/api/vpe_all_api_h.h>
46 #define foreach_stats_msg \
47 _(WANT_STATS, want_stats) \
48 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
49 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
50 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
51 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
52 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
53 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
54 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
55 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
56 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
57 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
58 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
59 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
60 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
61 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
62 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats)
65 #define vl_msg_name_crc_list
66 #include <vpp/stats/stats.api.h>
67 #undef vl_msg_name_crc_list
70 setup_message_id_table (api_main_t * am)
73 vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
74 foreach_vl_msg_name_crc_stats;
78 /* These constants ensure msg sizes <= 1024, aka ring allocation */
79 #define SIMPLE_COUNTER_BATCH_SIZE 126
80 #define COMBINED_COUNTER_BATCH_SIZE 63
81 #define IP4_FIB_COUNTER_BATCH_SIZE 48
82 #define IP6_FIB_COUNTER_BATCH_SIZE 30
85 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
89 format_vnet_interface_combined_counters (u8 * s, va_list * args)
91 stats_main_t *sm = &stats_main;
92 vl_api_vnet_interface_combined_counters_t *mp =
93 va_arg (*args, vl_api_vnet_interface_combined_counters_t *);
96 u32 count, sw_if_index;
98 count = ntohl (mp->count);
99 sw_if_index = ntohl (mp->first_sw_if_index);
103 vp = (vlib_counter_t *) mp->data;
105 switch (mp->vnet_counter_type)
107 case VNET_INTERFACE_COUNTER_RX:
110 case VNET_INTERFACE_COUNTER_TX:
114 counter_name = "bogus";
117 for (i = 0; i < count; i++)
119 packets = clib_mem_unaligned (&vp->packets, u64);
120 packets = clib_net_to_host_u64 (packets);
121 bytes = clib_mem_unaligned (&vp->bytes, u64);
122 bytes = clib_net_to_host_u64 (bytes);
124 s = format (s, "%U.%s.packets %lld\n",
125 format_vnet_sw_if_index_name,
126 sm->vnet_main, sw_if_index, counter_name, packets);
127 s = format (s, "%U.%s.bytes %lld\n",
128 format_vnet_sw_if_index_name,
129 sm->vnet_main, sw_if_index, counter_name, bytes);
136 format_vnet_interface_simple_counters (u8 * s, va_list * args)
138 stats_main_t *sm = &stats_main;
139 vl_api_vnet_interface_simple_counters_t *mp =
140 va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
142 u32 count, sw_if_index;
143 count = ntohl (mp->count);
144 sw_if_index = ntohl (mp->first_sw_if_index);
146 vp = (u64 *) mp->data;
149 switch (mp->vnet_counter_type)
151 case VNET_INTERFACE_COUNTER_DROP:
152 counter_name = "drop";
154 case VNET_INTERFACE_COUNTER_PUNT:
155 counter_name = "punt";
157 case VNET_INTERFACE_COUNTER_IP4:
158 counter_name = "ip4";
160 case VNET_INTERFACE_COUNTER_IP6:
161 counter_name = "ip6";
163 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
164 counter_name = "rx-no-buff";
166 case VNET_INTERFACE_COUNTER_RX_MISS:
167 counter_name = "rx-miss";
169 case VNET_INTERFACE_COUNTER_RX_ERROR:
170 counter_name = "rx-error (fifo-full)";
172 case VNET_INTERFACE_COUNTER_TX_ERROR:
173 counter_name = "tx-error (fifo-full)";
176 counter_name = "bogus";
179 for (i = 0; i < count; i++)
181 v = clib_mem_unaligned (vp, u64);
182 v = clib_net_to_host_u64 (v);
184 s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
185 sm->vnet_main, sw_if_index, counter_name, v);
193 dslock (stats_main_t * sm, int release_hint, int tag)
196 data_structure_lock_t *l = sm->data_structure_lock;
198 if (PREDICT_FALSE (l == 0))
201 thread_index = vlib_get_thread_index ();
202 if (l->lock && l->thread_index == thread_index)
211 while (__sync_lock_test_and_set (&l->lock, 1))
214 l->thread_index = thread_index;
219 stats_dslock_with_hint (int hint, int tag)
221 stats_main_t *sm = &stats_main;
222 dslock (sm, hint, tag);
226 dsunlock (stats_main_t * sm)
229 data_structure_lock_t *l = sm->data_structure_lock;
231 if (PREDICT_FALSE (l == 0))
234 thread_index = vlib_get_thread_index ();
235 ASSERT (l->lock && l->thread_index == thread_index);
241 CLIB_MEMORY_BARRIER ();
247 stats_dsunlock (int hint, int tag)
249 stats_main_t *sm = &stats_main;
253 static vpe_client_registration_t *
254 get_client_for_stat (u32 reg, u32 item, u32 client_index)
256 stats_main_t *sm = &stats_main;
257 vpe_client_stats_registration_t *registration;
260 /* Is there anything listening for item in that reg */
261 p = hash_get (sm->stats_registration_hash[reg], item);
266 /* If there is, is our client_index one of them */
267 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
268 p = hash_get (registration->client_hash, client_index);
273 return pool_elt_at_index (registration->clients, p[0]);
278 set_client_for_stat (u32 reg, u32 item, vpe_client_registration_t * client)
280 stats_main_t *sm = &stats_main;
281 vpe_client_stats_registration_t *registration;
282 vpe_client_registration_t *cr;
285 /* Is there anything listening for item in that reg */
286 p = hash_get (sm->stats_registration_hash[reg], item);
290 pool_get (sm->stats_registrations[reg], registration);
291 registration->item = item;
292 hash_set (sm->stats_registration_hash[reg], item,
293 registration - sm->stats_registrations[reg]);
297 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
300 p = hash_get (registration->client_hash, client->client_index);
304 pool_get (registration->clients, cr);
305 cr->client_index = client->client_index;
306 cr->client_pid = client->client_pid;
307 hash_set (registration->client_hash, cr->client_index,
308 cr - registration->clients);
311 return 1; //At least one client is doing something ... poll
315 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
317 stats_main_t *sm = &stats_main;
318 vpe_client_stats_registration_t *registration;
319 vpe_client_registration_t *client;
323 /* Clear the client first */
324 /* Is there anything listening for item in that reg */
325 p = hash_get (sm->stats_registration_hash[reg], item);
330 /* If there is, is our client_index one of them */
331 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
332 p = hash_get (registration->client_hash, client_index);
337 client = pool_elt_at_index (registration->clients, p[0]);
338 hash_unset (registration->client_hash, client->client_index);
339 pool_put (registration->clients, client);
341 /* Now check if that was the last client for that item */
342 if (0 == pool_elts (registration->clients))
344 hash_unset (sm->stats_registration_hash[reg], item);
345 pool_put (sm->stats_registrations[reg], registration);
350 /* Now check if that was the last item in any of the listened to stats */
351 for (i = 0; i < STATS_REG_N_IDX; i++)
353 elts += pool_elts (sm->stats_registrations[i]);
358 vpe_client_registration_t *
359 get_clients_for_stat (u32 reg, u32 item)
361 stats_main_t *sm = &stats_main;
362 vpe_client_registration_t *client, *clients = 0;
363 vpe_client_stats_registration_t *registration;
366 /* Is there anything listening for item in that reg */
367 p = hash_get (sm->stats_registration_hash[reg], item);
372 /* If there is, is our client_index one of them */
373 registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
375 vec_reset_length (clients);
376 pool_foreach (client, registration->clients, (
378 vec_add1 (clients, *client);}
385 clear_client_reg (u32 ** registrations)
387 /* When registrations[x] is a vector of pool indices
388 here is a good place to clean up the pools
390 #define stats_reg(n) vec_free(registrations[IDX_##n]);
391 #include <vpp/stats/stats.reg>
394 vec_free (registrations);
398 init_client_reg (u32 ** registrations)
402 Initialise the stats registrations for each
403 type of stat a client can register for as well as
404 a vector of "interested" indexes.
405 Initially this is a u32 of either sw_if_index or fib_index
406 but eventually this should migrate to a pool_index (u32)
407 with a type specific pool that can include more complex things
408 such as timing and structured events.
410 vec_validate (registrations, STATS_REG_N_IDX);
411 #define stats_reg(n) \
412 vec_reset_length(registrations[IDX_##n]);
413 #include <vpp/stats/stats.reg>
417 When registrations[x] is a vector of pool indices, here
418 is a good place to init the pools.
420 return registrations;
424 enable_all_client_reg (u32 ** registrations)
428 Enable all stats known by adding
429 ~0 to the index vector. Eventually this
430 should be deprecated.
432 #define stats_reg(n) \
433 vec_add1(registrations[IDX_##n], ~0);
434 #include <vpp/stats/stats.reg>
436 return registrations;
440 do_simple_interface_counters (stats_main_t * sm)
442 vl_api_vnet_interface_simple_counters_t *mp = 0;
443 vnet_interface_main_t *im = sm->interface_main;
444 api_main_t *am = sm->api_main;
445 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
446 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
447 vlib_simple_counter_main_t *cm;
448 u32 items_this_message = 0;
453 * Prevent interface registration from expanding / moving the vectors...
454 * That tends never to happen, so we can hold this lock for a while.
456 vnet_interface_counter_lock (im);
458 vec_foreach (cm, im->sw_if_counters)
460 n_counts = vlib_simple_counter_n_counters (cm);
461 for (i = 0; i < n_counts; i++)
465 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
468 mp = vl_msg_api_alloc_as_if_client
469 (sizeof (*mp) + items_this_message * sizeof (v));
470 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
471 mp->vnet_counter_type = cm - im->sw_if_counters;
472 mp->first_sw_if_index = htonl (i);
474 vp = (u64 *) mp->data;
476 v = vlib_get_simple_counter (cm, i);
477 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
480 if (mp->count == items_this_message)
482 mp->count = htonl (items_this_message);
483 /* Send to the main thread... */
484 vl_msg_api_send_shmem (q, (u8 *) & mp);
490 vnet_interface_counter_unlock (im);
494 handle_client_registration (vpe_client_registration_t * client, u32 stat,
495 u32 item, int enable_disable)
497 stats_main_t *sm = &stats_main;
498 vpe_client_registration_t *rp, _rp;
500 rp = get_client_for_stat (stat, item, client->client_index);
503 if (enable_disable == 0)
505 if (!rp) // No client to disable
507 clib_warning ("pid %d: already disabled for stats...",
512 clear_client_for_stat (stat, item, client->client_index);
519 rp->client_index = client->client_index;
520 rp->client_pid = client->client_pid;
521 sm->enable_poller = set_client_for_stat (stat, item, rp);
526 /**********************************
527 * ALL Interface Combined stats - to be deprecated
528 **********************************/
531 * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
534 vl_api_want_interface_combined_stats_t_handler
535 (vl_api_want_interface_combined_stats_t * mp)
537 stats_main_t *sm = &stats_main;
538 vpe_client_registration_t rp;
539 vl_api_want_interface_combined_stats_reply_t *rmp;
542 unix_shared_memory_queue_t *q;
545 swif = ~0; //Using same mechanism as _per_interface_
546 rp.client_index = mp->client_index;
547 rp.client_pid = mp->pid;
549 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
553 q = vl_api_client_index_to_input_queue (mp->client_index);
558 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
563 rmp = vl_msg_api_alloc (sizeof (*rmp));
564 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
565 rmp->context = mp->context;
566 rmp->retval = retval;
568 vl_msg_api_send_shmem (q, (u8 *) & rmp);
572 vl_api_vnet_interface_combined_counters_t_handler
573 (vl_api_vnet_interface_combined_counters_t * mp)
575 vpe_client_registration_t *clients, client;
576 stats_main_t *sm = &stats_main;
577 unix_shared_memory_queue_t *q, *q_prev = NULL;
578 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
582 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
585 get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
586 ~0 /*flag for all */ );
588 for (i = 0; i < vec_len (clients); i++)
591 q = vl_api_client_index_to_input_queue (client.client_index);
594 if (q_prev && (q_prev->cursize < q_prev->maxsize))
596 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
597 clib_memcpy (mp_copy, mp, mp_size);
598 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
605 fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
608 if (q_prev && (q_prev->cursize < q_prev->maxsize))
610 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
614 vl_msg_api_free (mp);
619 do_combined_interface_counters (stats_main_t * sm)
621 vl_api_vnet_interface_combined_counters_t *mp = 0;
622 vnet_interface_main_t *im = sm->interface_main;
623 api_main_t *am = sm->api_main;
624 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
625 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
626 vlib_combined_counter_main_t *cm;
627 u32 items_this_message = 0;
628 vlib_counter_t v, *vp = 0;
631 vnet_interface_counter_lock (im);
633 vec_foreach (cm, im->combined_sw_if_counters)
635 n_counts = vlib_combined_counter_n_counters (cm);
636 for (i = 0; i < n_counts; i++)
640 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
643 mp = vl_msg_api_alloc_as_if_client
644 (sizeof (*mp) + items_this_message * sizeof (v));
645 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
646 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
647 mp->first_sw_if_index = htonl (i);
649 vp = (vlib_counter_t *) mp->data;
651 vlib_get_combined_counter (cm, i, &v);
652 clib_mem_unaligned (&vp->packets, u64)
653 = clib_host_to_net_u64 (v.packets);
654 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
657 if (mp->count == items_this_message)
659 mp->count = htonl (items_this_message);
660 /* Send to the main thread... */
661 vl_msg_api_send_shmem (q, (u8 *) & mp);
667 vnet_interface_counter_unlock (im);
670 /**********************************
671 * Per Interface Combined stats
672 **********************************/
674 /* Request from client registering interfaces it wants */
676 vl_api_want_per_interface_combined_stats_t_handler
677 (vl_api_want_per_interface_combined_stats_t * mp)
679 stats_main_t *sm = &stats_main;
680 vpe_client_registration_t rp;
681 vl_api_want_per_interface_combined_stats_reply_t *rmp;
682 vlib_combined_counter_main_t *cm;
685 unix_shared_memory_queue_t *q;
689 // Validate we have good sw_if_indexes before registering
690 for (i = 0; i < mp->num; i++)
692 swif = mp->sw_ifs[i];
694 /* Check its a real sw_if_index that the client is allowed to see */
697 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
699 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
705 for (i = 0; i < mp->num; i++)
707 swif = mp->sw_ifs[i];
709 rp.client_index = mp->client_index;
710 rp.client_pid = mp->pid;
711 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
712 swif, mp->enable_disable);
716 q = vl_api_client_index_to_input_queue (mp->client_index);
720 for (i = 0; i < mp->num; i++)
722 swif = mp->sw_ifs[i];
724 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
730 rmp = vl_msg_api_alloc (sizeof (*rmp));
731 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
732 rmp->context = mp->context;
733 rmp->retval = retval;
735 vl_msg_api_send_shmem (q, (u8 *) & rmp);
738 /* Per Interface Combined distribution to client */
740 do_combined_per_interface_counters (stats_main_t * sm)
742 vl_api_vnet_per_interface_combined_counters_t *mp = 0;
743 vnet_interface_main_t *im = sm->interface_main;
744 api_main_t *am = sm->api_main;
745 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
746 unix_shared_memory_queue_t *q = NULL;
747 vlib_combined_counter_main_t *cm;
749 * items_this_message will eventually be used to optimise the batching
750 * of per client messages for each stat. For now setting this to 1 then
751 * iterate. This will not affect API.
753 * FIXME instead of enqueueing here, this should be sent to a batch
754 * storer for per-client transmission. Each "mp" sent would be a single entry
755 * and if a client is listening to other sw_if_indexes for same, it would be
756 * appended to that *mp
758 u32 items_this_message = 1;
759 vnet_combined_counter_t *vp = 0;
763 vpe_client_stats_registration_t *reg;
764 vpe_client_registration_t *client;
765 u32 *sw_if_index = 0;
769 - capturing the timestamp of the counters "when VPP knew them" is important.
770 Less so is that the timing of the delivery to the control plane be in the same
773 i.e. As long as the control plane can delta messages from VPP and work out
774 velocity etc based on the timestamp, it can do so in a more "batch mode".
776 It would be beneficial to keep a "per-client" message queue, and then
777 batch all the stat messages for a client into one message, with
780 Given this particular API is for "per interface" one assumes that the scale
781 is less than the ~0 case, which the prior API is suited for.
783 vnet_interface_counter_lock (im);
785 timestamp = vlib_time_now (sm->vlib_main);
787 vec_reset_length (sm->regs_tmp);
789 sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
792 vec_add1 (sm->regs_tmp, reg);}));
794 for (i = 0; i < vec_len (sm->regs_tmp); i++)
796 reg = sm->regs_tmp[i];
799 vnet_interface_counter_unlock (im);
800 do_combined_interface_counters (sm);
801 vnet_interface_counter_lock (im);
804 vec_reset_length (sm->clients_tmp);
805 pool_foreach (client, reg->clients, (
807 vec_add1 (sm->clients_tmp,
811 //FIXME - should be doing non-variant part of mp here and managing
812 // any alloc per client in that vec_foreach
813 for (j = 0; j < vec_len (sm->clients_tmp); j++)
815 client = sm->clients_tmp[j];
816 q = vl_api_client_index_to_input_queue (client->client_index);
818 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
822 clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
823 reg->item, client->client_index);
827 mp = vl_msg_api_alloc (sizeof (*mp) +
828 (items_this_message *
829 (sizeof (*vp) /* rx */ )));
831 // FIXME when optimising for items_this_message > 1 need to include a
832 // SIMPLE_INTERFACE_BATCH_SIZE check.
834 ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
836 mp->count = items_this_message;
837 mp->timestamp = timestamp;
838 vp = (vnet_combined_counter_t *) mp->data;
840 vp->sw_if_index = htonl (reg->item);
842 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
843 vlib_get_combined_counter (cm, reg->item, &v);
844 clib_mem_unaligned (&vp->rx_packets, u64)
845 = clib_host_to_net_u64 (v.packets);
846 clib_mem_unaligned (&vp->rx_bytes, u64) =
847 clib_host_to_net_u64 (v.bytes);
850 /* TX vlib_counter_t packets/bytes */
851 cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
852 vlib_get_combined_counter (cm, reg->item, &v);
853 clib_mem_unaligned (&vp->tx_packets, u64)
854 = clib_host_to_net_u64 (v.packets);
855 clib_mem_unaligned (&vp->tx_bytes, u64) =
856 clib_host_to_net_u64 (v.bytes);
858 vl_msg_api_send_shmem (q, (u8 *) & mp);
862 vnet_interface_counter_unlock (im);
865 /**********************************
866 * Per Interface simple stats
867 **********************************/
869 /* Request from client registering interfaces it wants */
871 vl_api_want_per_interface_simple_stats_t_handler
872 (vl_api_want_per_interface_simple_stats_t * mp)
874 stats_main_t *sm = &stats_main;
875 vpe_client_registration_t rp;
876 vl_api_want_per_interface_simple_stats_reply_t *rmp;
877 vlib_simple_counter_main_t *cm;
880 unix_shared_memory_queue_t *q;
884 for (i = 0; i < mp->num; i++)
886 swif = mp->sw_ifs[i];
888 /* Check its a real sw_if_index that the client is allowed to see */
891 if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
893 retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
899 for (i = 0; i < mp->num; i++)
901 swif = mp->sw_ifs[i];
903 rp.client_index = mp->client_index;
904 rp.client_pid = mp->pid;
905 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
906 swif, mp->enable_disable);
910 q = vl_api_client_index_to_input_queue (mp->client_index);
912 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
915 for (i = 0; i < mp->num; i++)
917 swif = mp->sw_ifs[i];
919 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
927 rmp = vl_msg_api_alloc (sizeof (*rmp));
928 rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
929 rmp->context = mp->context;
930 rmp->retval = retval;
932 vl_msg_api_send_shmem (q, (u8 *) & rmp);
935 /* Per Interface Simple distribution to client */
937 do_simple_per_interface_counters (stats_main_t * sm)
939 vl_api_vnet_per_interface_simple_counters_t *mp = 0;
940 vnet_interface_main_t *im = sm->interface_main;
941 api_main_t *am = sm->api_main;
942 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
943 unix_shared_memory_queue_t *q = NULL;
944 vlib_simple_counter_main_t *cm;
946 * items_this_message will eventually be used to optimise the batching
947 * of per client messages for each stat. For now setting this to 1 then
948 * iterate. This will not affect API.
950 * FIXME instead of enqueueing here, this should be sent to a batch
951 * storer for per-client transmission. Each "mp" sent would be a single entry
952 * and if a client is listening to other sw_if_indexes for same, it would be
953 * appended to that *mp
955 u32 items_this_message = 1;
957 vpe_client_stats_registration_t *reg;
958 vpe_client_registration_t *client;
961 vnet_simple_counter_t *vp = 0;
966 - capturing the timestamp of the counters "when VPP knew them" is important.
967 Less so is that the timing of the delivery to the control plane be in the same
970 i.e. As long as the control plane can delta messages from VPP and work out
971 velocity etc based on the timestamp, it can do so in a more "batch mode".
973 It would be beneficial to keep a "per-client" message queue, and then
974 batch all the stat messages for a client into one message, with
977 Given this particular API is for "per interface" one assumes that the scale
978 is less than the ~0 case, which the prior API is suited for.
980 vnet_interface_counter_lock (im);
982 timestamp = vlib_time_now (sm->vlib_main);
984 vec_reset_length (sm->regs_tmp);
986 sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], (
992 for (i = 0; i < vec_len (sm->regs_tmp); i++)
994 reg = sm->regs_tmp[i];
997 vnet_interface_counter_unlock (im);
998 do_simple_interface_counters (sm);
999 vnet_interface_counter_lock (im);
1002 vec_reset_length (sm->clients_tmp);
1003 pool_foreach (client, reg->clients, (
1005 vec_add1 (sm->clients_tmp,
1009 //FIXME - should be doing non-variant part of mp here and managing
1010 // any alloc per client in that vec_foreach
1011 for (j = 0; j < vec_len (sm->clients_tmp); j++)
1013 client = sm->clients_tmp[j];
1014 q = vl_api_client_index_to_input_queue (client->client_index);
1016 //Client may have disconnected abrubtly, clean up so we don't poll nothing.
1020 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1021 reg->item, client->client_index);
1025 size = (sizeof (*mp) + (items_this_message * (sizeof (u64) * 10)));
1026 mp = vl_msg_api_alloc (size);
1027 // FIXME when optimising for items_this_message > 1 need to include a
1028 // SIMPLE_INTERFACE_BATCH_SIZE check.
1029 mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1031 mp->count = items_this_message;
1032 mp->timestamp = timestamp;
1033 vp = (vnet_simple_counter_t *) mp->data;
1035 vp->sw_if_index = htonl (reg->item);
1037 //FIXME will be simpler with a preprocessor macro
1038 // VNET_INTERFACE_COUNTER_DROP
1039 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
1040 v = vlib_get_simple_counter (cm, reg->item);
1041 clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1043 // VNET_INTERFACE_COUNTER_PUNT
1044 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_PUNT;
1045 v = vlib_get_simple_counter (cm, reg->item);
1046 clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1048 // VNET_INTERFACE_COUNTER_IP4
1049 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP4;
1050 v = vlib_get_simple_counter (cm, reg->item);
1051 clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1053 //VNET_INTERFACE_COUNTER_IP6
1054 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_IP6;
1055 v = vlib_get_simple_counter (cm, reg->item);
1056 clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1058 //VNET_INTERFACE_COUNTER_RX_NO_BUF
1059 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_NO_BUF;
1060 v = vlib_get_simple_counter (cm, reg->item);
1061 clib_mem_unaligned (&vp->rx_no_buffer, u64) =
1062 clib_host_to_net_u64 (v);
1064 //VNET_INTERFACE_COUNTER_RX_MISS
1065 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_MISS;
1066 v = vlib_get_simple_counter (cm, reg->item);
1067 clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1069 //VNET_INTERFACE_COUNTER_RX_ERROR
1070 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_RX_ERROR;
1071 v = vlib_get_simple_counter (cm, reg->item);
1072 clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1074 //VNET_INTERFACE_COUNTER_TX_ERROR
1075 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_TX_ERROR;
1076 v = vlib_get_simple_counter (cm, reg->item);
1077 clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1079 //VNET_INTERFACE_COUNTER_MPLS
1080 cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_MPLS;
1081 v = vlib_get_simple_counter (cm, reg->item);
1082 clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1084 vl_msg_api_send_shmem (q, (u8 *) & mp);
1088 vnet_interface_counter_unlock (im);
1091 /**********************************
1093 **********************************/
1096 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
1098 struct timespec _req, *req = &_req;
1099 struct timespec _rem, *rem = &_rem;
1102 req->tv_nsec = nsec;
1105 if (nanosleep (req, rem) == 0)
1110 clib_unix_warning ("nanosleep");
1116 * @brief The context passed when collecting adjacency counters
1118 typedef struct ip4_nbr_stats_ctx_t_
1121 * The SW IF index all these adjs belong to
1126 * A vector of ip4 nbr counters
1128 vl_api_ip4_nbr_counter_t *counters;
1129 } ip4_nbr_stats_ctx_t;
1131 static adj_walk_rc_t
1132 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
1134 vl_api_ip4_nbr_counter_t *vl_counter;
1135 vlib_counter_t adj_counter;
1136 ip4_nbr_stats_ctx_t *ctx;
1137 ip_adjacency_t *adj;
1140 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1142 if (0 != adj_counter.packets)
1144 vec_add2 (ctx->counters, vl_counter, 1);
1147 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1148 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1149 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1150 vl_counter->link_type = adj->ia_link;
1152 return (ADJ_WALK_RC_CONTINUE);
1155 #define MIN(x,y) (((x)<(y))?(x):(y))
1158 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
1160 api_main_t *am = sm->api_main;
1161 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1162 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
1163 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
1167 * If the walk context has counters, which may be left over from the last
1168 * suspend, then we continue from there.
1170 while (0 != vec_len (ctx->counters))
1172 u32 n_items = MIN (vec_len (ctx->counters),
1173 IP4_FIB_COUNTER_BATCH_SIZE);
1176 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1178 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1181 (vl_api_ip4_nbr_counter_t)));
1182 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1183 mp->count = ntohl (n_items);
1184 mp->sw_if_index = ntohl (ctx->sw_if_index);
1189 * copy the counters from the back of the context, then we can easily
1190 * 'erase' them by resetting the vector length.
1191 * The order we push the stats to the caller is not important.
1194 &ctx->counters[vec_len (ctx->counters) - n_items],
1195 n_items * sizeof (*ctx->counters));
1197 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1202 unix_shared_memory_queue_lock (q);
1203 pause = unix_shared_memory_queue_is_full (q);
1205 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1206 unix_shared_memory_queue_unlock (q);
1210 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1211 STATS_RELEASE_DELAY_NS);
1216 do_ip4_nbr_counters (stats_main_t * sm)
1218 vnet_main_t *vnm = vnet_get_main ();
1219 vnet_interface_main_t *im = &vnm->interface_main;
1220 vnet_sw_interface_t *si;
1222 ip4_nbr_stats_ctx_t ctx = {
1228 pool_foreach (si, im->sw_interfaces,
1231 * update the interface we are now concerned with
1233 ctx.sw_if_index = si->sw_if_index;
1236 * we are about to walk another interface, so we shouldn't have any pending
1239 ASSERT(ctx.counters == NULL);
1242 * visit each neighbour adjacency on the interface and collect
1243 * its current stats.
1244 * Because we hold the lock the walk is synchronous, so safe to routing
1245 * updates. It's limited in work by the number of adjacenies on an
1246 * interface, which is typically not huge.
1248 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1249 adj_nbr_walk (si->sw_if_index,
1256 * if this interface has some adjacencies with counters then ship them,
1257 * else continue to the next interface.
1259 if (NULL != ctx.counters)
1261 ip4_nbr_ship(sm, &ctx);
1268 * @brief The context passed when collecting adjacency counters
1270 typedef struct ip6_nbr_stats_ctx_t_
1273 * The SW IF index all these adjs belong to
1278 * A vector of ip6 nbr counters
1280 vl_api_ip6_nbr_counter_t *counters;
1281 } ip6_nbr_stats_ctx_t;
1283 static adj_walk_rc_t
1284 ip6_nbr_stats_cb (adj_index_t ai,
1287 vl_api_ip6_nbr_counter_t *vl_counter;
1288 vlib_counter_t adj_counter;
1289 ip6_nbr_stats_ctx_t *ctx;
1290 ip_adjacency_t *adj;
1293 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1295 if (0 != adj_counter.packets)
1297 vec_add2(ctx->counters, vl_counter, 1);
1300 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1301 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1302 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1303 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1304 vl_counter->link_type = adj->ia_link;
1306 return (ADJ_WALK_RC_CONTINUE);
1309 #define MIN(x,y) (((x)<(y))?(x):(y))
1312 ip6_nbr_ship (stats_main_t * sm,
1313 ip6_nbr_stats_ctx_t *ctx)
1315 api_main_t *am = sm->api_main;
1316 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1317 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
1318 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
1322 * If the walk context has counters, which may be left over from the last
1323 * suspend, then we continue from there.
1325 while (0 != vec_len(ctx->counters))
1327 u32 n_items = MIN (vec_len (ctx->counters),
1328 IP6_FIB_COUNTER_BATCH_SIZE);
1331 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1333 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1336 (vl_api_ip6_nbr_counter_t)));
1337 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1338 mp->count = ntohl (n_items);
1339 mp->sw_if_index = ntohl (ctx->sw_if_index);
1344 * copy the counters from the back of the context, then we can easily
1345 * 'erase' them by resetting the vector length.
1346 * The order we push the stats to the caller is not important.
1349 &ctx->counters[vec_len (ctx->counters) - n_items],
1350 n_items * sizeof (*ctx->counters));
1352 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1357 unix_shared_memory_queue_lock (q);
1358 pause = unix_shared_memory_queue_is_full (q);
1360 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1361 unix_shared_memory_queue_unlock (q);
1365 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1366 STATS_RELEASE_DELAY_NS);
1371 do_ip6_nbr_counters (stats_main_t * sm)
1373 vnet_main_t *vnm = vnet_get_main ();
1374 vnet_interface_main_t *im = &vnm->interface_main;
1375 vnet_sw_interface_t *si;
1377 ip6_nbr_stats_ctx_t ctx = {
1383 pool_foreach (si, im->sw_interfaces,
1386 * update the interface we are now concerned with
1388 ctx.sw_if_index = si->sw_if_index;
1391 * we are about to walk another interface, so we shouldn't have any pending
1394 ASSERT(ctx.counters == NULL);
1397 * visit each neighbour adjacency on the interface and collect
1398 * its current stats.
1399 * Because we hold the lock the walk is synchronous, so safe to routing
1400 * updates. It's limited in work by the number of adjacenies on an
1401 * interface, which is typically not huge.
1403 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1404 adj_nbr_walk (si->sw_if_index,
1411 * if this interface has some adjacencies with counters then ship them,
1412 * else continue to the next interface.
1414 if (NULL != ctx.counters)
1416 ip6_nbr_ship(sm, &ctx);
1423 do_ip4_fib_counters (stats_main_t * sm)
1425 ip4_main_t *im4 = &ip4_main;
1426 api_main_t *am = sm->api_main;
1427 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1428 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
1432 do_ip46_fibs_t *do_fibs;
1433 vl_api_vnet_ip4_fib_counters_t *mp = 0;
1434 u32 items_this_message;
1435 vl_api_ip4_fib_counter_t *ctrp = 0;
1436 u32 start_at_fib_index = 0;
1439 do_fibs = &sm->do_ip46_fibs;
1442 vec_reset_length (do_fibs->fibs);
1444 pool_foreach (fib, im4->fibs,
1445 ({vec_add1(do_fibs->fibs,fib);}));
1449 for (j = 0; j < vec_len (do_fibs->fibs); j++)
1451 fib = do_fibs->fibs[j];
1452 /* We may have bailed out due to control-plane activity */
1453 while ((fib - im4->fibs) < start_at_fib_index)
1456 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1460 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1461 mp = vl_msg_api_alloc_as_if_client
1463 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1464 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1466 mp->vrf_id = ntohl (fib->ft_table_id);
1467 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1471 /* happens if the last FIB was empty... */
1472 ASSERT (mp->count == 0);
1473 mp->vrf_id = ntohl (fib->ft_table_id);
1476 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1478 vec_reset_length (do_fibs->ip4routes);
1479 vec_reset_length (do_fibs->results);
1481 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1483 uword *hash = v4_fib->fib_entry_by_dst_address[i];
1487 vec_reset_length (do_fibs->pvec);
1489 x.address_length = i;
1491 hash_foreach_pair (p, hash, (
1493 vec_add1 (do_fibs->pvec, p);}
1495 for (k = 0; k < vec_len (do_fibs->pvec); k++)
1497 p = do_fibs->pvec[k];
1498 x.address.data_u32 = p->key;
1499 x.index = p->value[0];
1501 vec_add1 (do_fibs->ip4routes, x);
1502 if (sm->data_structure_lock->release_hint)
1504 start_at_fib_index = fib - im4->fibs;
1506 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1507 STATS_RELEASE_DELAY_NS);
1509 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1515 vec_foreach (r, do_fibs->ip4routes)
1518 const dpo_id_t *dpo_id;
1521 dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1522 index = (u32) dpo_id->dpoi_index;
1524 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1527 * If it has actually
1528 * seen at least one packet, send it.
1533 /* already in net byte order */
1534 ctrp->address = r->address.as_u32;
1535 ctrp->address_length = r->address_length;
1536 ctrp->packets = clib_host_to_net_u64 (c.packets);
1537 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1541 if (mp->count == items_this_message)
1543 mp->count = htonl (items_this_message);
1545 * If the main thread's input queue is stuffed,
1546 * drop the data structure lock (which the main thread
1547 * may want), and take a pause.
1549 unix_shared_memory_queue_lock (q);
1550 if (unix_shared_memory_queue_is_full (q))
1553 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1554 unix_shared_memory_queue_unlock (q);
1556 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1557 STATS_RELEASE_DELAY_NS);
1560 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1561 unix_shared_memory_queue_unlock (q);
1563 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1564 mp = vl_msg_api_alloc_as_if_client
1566 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1567 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1569 mp->vrf_id = ntohl (fib->ft_table_id);
1570 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1572 } /* for each (mp or single) adj */
1573 if (sm->data_structure_lock->release_hint)
1575 start_at_fib_index = fib - im4->fibs;
1577 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1579 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1582 } /* vec_foreach (routes) */
1586 /* Flush any data from this fib */
1589 mp->count = htonl (mp->count);
1590 vl_msg_api_send_shmem (q, (u8 *) & mp);
1595 /* If e.g. the last FIB had no reportable routes, free the buffer */
1597 vl_msg_api_free (mp);
1603 ip6_route_t **routep;
1605 } add_routes_in_fib_arg_t;
1608 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1610 add_routes_in_fib_arg_t *ap = arg;
1611 stats_main_t *sm = ap->sm;
1613 if (sm->data_structure_lock->release_hint)
1614 clib_longjmp (&sm->jmp_buf, 1);
1616 if (kvp->key[2] >> 32 == ap->fib_index)
1618 ip6_address_t *addr;
1620 addr = (ip6_address_t *) kvp;
1621 vec_add2 (*ap->routep, r, 1);
1622 r->address = addr[0];
1623 r->address_length = kvp->key[2] & 0xFF;
1624 r->index = kvp->value;
1629 do_ip6_fib_counters (stats_main_t * sm)
1631 ip6_main_t *im6 = &ip6_main;
1632 api_main_t *am = sm->api_main;
1633 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
1634 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
1637 do_ip46_fibs_t *do_fibs;
1638 vl_api_vnet_ip6_fib_counters_t *mp = 0;
1639 u32 items_this_message;
1640 vl_api_ip6_fib_counter_t *ctrp = 0;
1641 u32 start_at_fib_index = 0;
1642 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
1643 add_routes_in_fib_arg_t _a, *a = &_a;
1646 do_fibs = &sm->do_ip46_fibs;
1648 vec_reset_length (do_fibs->fibs);
1650 pool_foreach (fib, im6->fibs,
1651 ({vec_add1(do_fibs->fibs,fib);}));
1655 for (i = 0; i < vec_len (do_fibs->fibs); i++)
1657 fib = do_fibs->fibs[i];
1658 /* We may have bailed out due to control-plane activity */
1659 while ((fib - im6->fibs) < start_at_fib_index)
1664 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1665 mp = vl_msg_api_alloc_as_if_client
1667 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1668 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1670 mp->vrf_id = ntohl (fib->ft_table_id);
1671 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1674 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1676 vec_reset_length (do_fibs->ip6routes);
1677 vec_reset_length (do_fibs->results);
1679 a->fib_index = fib - im6->fibs;
1680 a->routep = &do_fibs->ip6routes;
1683 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
1685 start_at_fib_index = fib - im6->fibs;
1686 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
1691 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1692 STATS_RELEASE_DELAY_NS);
1694 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1698 vec_foreach (r, do_fibs->ip6routes)
1702 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
1705 * If it has actually
1706 * seen at least one packet, send it.
1710 /* already in net byte order */
1711 ctrp->address[0] = r->address.as_u64[0];
1712 ctrp->address[1] = r->address.as_u64[1];
1713 ctrp->address_length = (u8) r->address_length;
1714 ctrp->packets = clib_host_to_net_u64 (c.packets);
1715 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1719 if (mp->count == items_this_message)
1721 mp->count = htonl (items_this_message);
1723 * If the main thread's input queue is stuffed,
1724 * drop the data structure lock (which the main thread
1725 * may want), and take a pause.
1727 unix_shared_memory_queue_lock (q);
1728 if (unix_shared_memory_queue_is_full (q))
1731 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1732 unix_shared_memory_queue_unlock (q);
1734 ip46_fib_stats_delay (sm, 0 /* sec */ ,
1735 STATS_RELEASE_DELAY_NS);
1738 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1739 unix_shared_memory_queue_unlock (q);
1741 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1742 mp = vl_msg_api_alloc_as_if_client
1744 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1745 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1747 mp->vrf_id = ntohl (fib->ft_table_id);
1748 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1752 if (sm->data_structure_lock->release_hint)
1754 start_at_fib_index = fib - im6->fibs;
1756 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1758 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1761 } /* vec_foreach (routes) */
1765 /* Flush any data from this fib */
1768 mp->count = htonl (mp->count);
1769 vl_msg_api_send_shmem (q, (u8 *) & mp);
1774 /* If e.g. the last FIB had no reportable routes, free the buffer */
1776 vl_msg_api_free (mp);
1780 stats_thread_fn (void *arg)
1782 stats_main_t *sm = &stats_main;
1783 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1784 vlib_thread_main_t *tm = vlib_get_thread_main ();
1786 /* stats thread wants no signals. */
1790 pthread_sigmask (SIG_SETMASK, &s, 0);
1793 if (vec_len (tm->thread_prefix))
1794 vlib_set_thread_name ((char *)
1795 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
1797 clib_mem_set_heap (w->thread_mheap);
1801 /* 10 second poll interval */
1802 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
1804 if (!(sm->enable_poller))
1809 (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
1810 do_combined_per_interface_counters (sm);
1813 (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
1814 do_simple_per_interface_counters (sm);
1816 if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
1817 do_ip4_fib_counters (sm);
1819 if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
1820 do_ip6_fib_counters (sm);
1822 if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
1823 do_ip4_nbr_counters (sm);
1825 if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
1826 do_ip6_nbr_counters (sm);
1831 vl_api_vnet_interface_simple_counters_t_handler
1832 (vl_api_vnet_interface_simple_counters_t * mp)
1834 vpe_client_registration_t *clients, client;
1835 stats_main_t *sm = &stats_main;
1836 unix_shared_memory_queue_t *q, *q_prev = NULL;
1837 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
1841 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
1844 get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1845 ~0 /*flag for all */ );
1847 for (i = 0; i < vec_len (clients); i++)
1849 client = clients[i];
1850 q = vl_api_client_index_to_input_queue (client.client_index);
1853 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1855 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
1856 clib_memcpy (mp_copy, mp, mp_size);
1857 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1865 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
1866 client.client_index);
1872 fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
1875 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1877 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1881 vl_msg_api_free (mp);
1890 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
1892 stats_main_t *sm = &stats_main;
1893 unix_shared_memory_queue_t *q, *q_prev = NULL;
1894 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
1896 vpe_client_registration_t *clients, client;
1899 mp_size = sizeof (*mp_copy) +
1900 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
1903 get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
1905 for (i = 0; i < vec_len (clients); i++)
1907 client = clients[i];
1908 q = vl_api_client_index_to_input_queue (client.client_index);
1911 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1913 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
1914 clib_memcpy (mp_copy, mp, mp_size);
1915 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1922 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
1923 ~0, client.client_index);
1928 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1930 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1934 vl_msg_api_free (mp);
1939 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
1941 stats_main_t *sm = &stats_main;
1942 unix_shared_memory_queue_t *q, *q_prev = NULL;
1943 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
1945 vpe_client_registration_t *clients, client;
1948 mp_size = sizeof (*mp_copy) +
1949 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
1952 get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
1954 for (i = 0; i < vec_len (clients); i++)
1956 client = clients[i];
1957 q = vl_api_client_index_to_input_queue (client.client_index);
1960 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1962 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
1963 clib_memcpy (mp_copy, mp, mp_size);
1964 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1971 sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
1972 ~0, client.client_index);
1978 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1980 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1984 vl_msg_api_free (mp);
1989 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
1991 stats_main_t *sm = &stats_main;
1992 unix_shared_memory_queue_t *q, *q_prev = NULL;
1993 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
1995 vpe_client_registration_t *clients, client;
1998 mp_size = sizeof (*mp_copy) +
1999 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2002 get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2004 for (i = 0; i < vec_len (clients); i++)
2006 client = clients[i];
2007 q = vl_api_client_index_to_input_queue (client.client_index);
2010 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2012 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2013 clib_memcpy (mp_copy, mp, mp_size);
2014 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2021 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2022 ~0, client.client_index);
2027 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2029 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2033 vl_msg_api_free (mp);
2038 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
2040 stats_main_t *sm = &stats_main;
2041 unix_shared_memory_queue_t *q, *q_prev = NULL;
2042 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
2044 vpe_client_registration_t *clients, client;
2047 mp_size = sizeof (*mp_copy) +
2048 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2051 get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2053 for (i = 0; i < vec_len (clients); i++)
2055 client = clients[i];
2056 q = vl_api_client_index_to_input_queue (client.client_index);
2059 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2061 mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2062 clib_memcpy (mp_copy, mp, mp_size);
2063 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2070 sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2071 ~0, client.client_index);
2076 if (q_prev && (q_prev->cursize < q_prev->maxsize))
2078 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2082 vl_msg_api_free (mp);
2087 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
2089 stats_main_t *sm = &stats_main;
2090 vpe_client_registration_t rp;
2091 vl_api_want_stats_reply_t *rmp;
2095 unix_shared_memory_queue_t *q;
2097 item = ~0; //"ALL THE THINGS IN THE THINGS
2098 rp.client_index = mp->client_index;
2099 rp.client_pid = mp->pid;
2101 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2102 item, mp->enable_disable);
2104 handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2105 item, mp->enable_disable);
2107 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2108 item, mp->enable_disable);
2110 handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2111 item, mp->enable_disable);
2113 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2114 item, mp->enable_disable);
2116 handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2117 item, mp->enable_disable);
2120 q = vl_api_client_index_to_input_queue (mp->client_index);
2125 rmp = vl_msg_api_alloc (sizeof (*rmp));
2126 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2127 rmp->context = mp->context;
2128 rmp->retval = retval;
2130 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2134 vl_api_want_interface_simple_stats_t_handler
2135 (vl_api_want_interface_simple_stats_t * mp)
2137 stats_main_t *sm = &stats_main;
2138 vpe_client_registration_t rp;
2139 vl_api_want_interface_simple_stats_reply_t *rmp;
2143 unix_shared_memory_queue_t *q;
2145 swif = ~0; //Using same mechanism as _per_interface_
2146 rp.client_index = mp->client_index;
2147 rp.client_pid = mp->pid;
2149 handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2150 mp->enable_disable);
2153 q = vl_api_client_index_to_input_queue (mp->client_index);
2158 clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2163 rmp = vl_msg_api_alloc (sizeof (*rmp));
2164 rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2165 rmp->context = mp->context;
2166 rmp->retval = retval;
2168 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2173 vl_api_want_ip4_fib_stats_t_handler (vl_api_want_ip4_fib_stats_t * mp)
2175 stats_main_t *sm = &stats_main;
2176 vpe_client_registration_t rp;
2177 vl_api_want_ip4_fib_stats_reply_t *rmp;
2180 unix_shared_memory_queue_t *q;
2183 fib = ~0; //Using same mechanism as _per_interface_
2184 rp.client_index = mp->client_index;
2185 rp.client_pid = mp->pid;
2187 handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2188 mp->enable_disable);
2191 q = vl_api_client_index_to_input_queue (mp->client_index);
2195 sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2196 fib, mp->client_index);
2200 rmp = vl_msg_api_alloc (sizeof (*rmp));
2201 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2202 rmp->context = mp->context;
2203 rmp->retval = retval;
2205 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2209 vl_api_want_ip6_fib_stats_t_handler (vl_api_want_ip6_fib_stats_t * mp)
2211 stats_main_t *sm = &stats_main;
2212 vpe_client_registration_t rp;
2213 vl_api_want_ip4_fib_stats_reply_t *rmp;
2216 unix_shared_memory_queue_t *q;
2219 fib = ~0; //Using same mechanism as _per_interface_
2220 rp.client_index = mp->client_index;
2221 rp.client_pid = mp->pid;
2223 handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2224 mp->enable_disable);
2227 q = vl_api_client_index_to_input_queue (mp->client_index);
2231 sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2232 fib, mp->client_index);
2236 rmp = vl_msg_api_alloc (sizeof (*rmp));
2237 rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2238 rmp->context = mp->context;
2239 rmp->retval = retval;
2241 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2244 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2246 vl_api_want_ip4_nbr_stats_t_handler (vl_api_want_ip4_nbr_stats_t * mp)
2251 vl_api_want_ip6_nbr_stats_t_handler (vl_api_want_ip6_nbr_stats_t * mp)
2256 vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp)
2258 stats_main_t *sm = &stats_main;
2259 vnet_interface_main_t *im = sm->interface_main;
2260 vl_api_vnet_get_summary_stats_reply_t *rmp;
2261 vlib_combined_counter_main_t *cm;
2264 u64 total_pkts[VLIB_N_RX_TX];
2265 u64 total_bytes[VLIB_N_RX_TX];
2267 unix_shared_memory_queue_t *q =
2268 vl_api_client_index_to_input_queue (mp->client_index);
2275 rmp = vl_msg_api_alloc (sizeof (*rmp));
2276 rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2277 rmp->context = mp->context;
2280 memset (total_pkts, 0, sizeof (total_pkts));
2281 memset (total_bytes, 0, sizeof (total_bytes));
2283 vnet_interface_counter_lock (im);
2285 vec_foreach (cm, im->combined_sw_if_counters)
2287 which = cm - im->combined_sw_if_counters;
2289 for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2291 vlib_get_combined_counter (cm, i, &v);
2292 total_pkts[which] += v.packets;
2293 total_bytes[which] += v.bytes;
2296 vnet_interface_counter_unlock (im);
2298 rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2299 rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2300 rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2301 rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2303 clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2305 vl_msg_api_send_shmem (q, (u8 *) & rmp);
2309 stats_memclnt_delete_callback (u32 client_index)
2311 vpe_client_stats_registration_t *rp;
2312 stats_main_t *sm = &stats_main;
2316 /* p = hash_get (sm->stats_registration_hash, client_index); */
2319 /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2320 /* pool_put (sm->stats_registrations, rp); */
2321 /* hash_unset (sm->stats_registration_hash, client_index); */
2327 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2328 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2329 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2330 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2331 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2332 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2333 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2334 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2335 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2336 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2337 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2338 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2340 static clib_error_t *
2341 stats_init (vlib_main_t * vm)
2343 stats_main_t *sm = &stats_main;
2344 api_main_t *am = &api_main;
2345 void *vlib_worker_thread_bootstrap_fn (void *arg);
2348 sm->vnet_main = vnet_get_main ();
2349 sm->interface_main = &vnet_get_main ()->interface_main;
2351 sm->stats_poll_interval_in_seconds = 10;
2352 sm->data_structure_lock =
2353 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
2354 CLIB_CACHE_LINE_BYTES);
2355 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
2358 vl_msg_api_set_handlers(VL_API_##N, #n, \
2359 vl_api_##n##_t_handler, \
2361 vl_api_##n##_t_endian, \
2362 vl_api_##n##_t_print, \
2363 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
2367 /* tell the msg infra not to free these messages... */
2368 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
2369 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
2370 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
2371 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
2372 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
2373 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
2376 * Set up the (msg_name, crc, message-id) table
2378 setup_message_id_table (am);
2380 vec_validate (sm->stats_registrations, STATS_REG_N_IDX);
2381 vec_validate (sm->stats_registration_hash, STATS_REG_N_IDX);
2382 #define stats_reg(n) \
2383 sm->stats_registrations[IDX_##n] = 0; \
2384 sm->stats_registration_hash[IDX_##n] = 0;
2385 #include <vpp/stats/stats.reg>
2391 VLIB_INIT_FUNCTION (stats_init);
2394 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
2396 .function = stats_thread_fn,
2399 .no_data_structure_clone = 1,
2405 * fd.io coding-style-patch-verification: ON
2408 * eval: (c-set-style "gnu")