4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/types.h>
40 #include <rte_cycles.h>
41 #include <rte_ethdev.h>
42 #include <rte_metrics.h>
43 #include <rte_memzone.h>
44 #include <rte_lcore.h>
46 #include "rte_latencystats.h"
48 /** Nano seconds per second */
49 #define NS_PER_SEC 1E9
51 /** Clock cycles per nano second */
53 latencystat_cycles_per_ns(void)
55 return rte_get_timer_hz() / NS_PER_SEC;
58 /* Macros for printing using RTE_LOG */
59 #define RTE_LOGTYPE_LATENCY_STATS RTE_LOGTYPE_USER1
61 static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
62 static int latency_stats_index;
63 static uint64_t samp_intvl;
64 static uint64_t timer_tsc;
65 static uint64_t prev_tsc;
67 struct rte_latency_stats {
68 float min_latency; /**< Minimum latency in nano seconds */
69 float avg_latency; /**< Average latency in nano seconds */
70 float max_latency; /**< Maximum latency in nano seconds */
71 float jitter; /** Latency variation */
74 static struct rte_latency_stats *glob_stats;
77 struct rte_eth_rxtx_callback *cb;
80 static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
81 static struct rxtx_cbs tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
83 struct latency_stats_nameoff {
84 char name[RTE_ETH_XSTATS_NAME_SIZE];
88 static const struct latency_stats_nameoff lat_stats_strings[] = {
89 {"min_latency_ns", offsetof(struct rte_latency_stats, min_latency)},
90 {"avg_latency_ns", offsetof(struct rte_latency_stats, avg_latency)},
91 {"max_latency_ns", offsetof(struct rte_latency_stats, max_latency)},
92 {"jitter_ns", offsetof(struct rte_latency_stats, jitter)},
95 #define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
96 sizeof(lat_stats_strings[0]))
99 rte_latencystats_update(void)
102 float *stats_ptr = NULL;
103 uint64_t values[NUM_LATENCY_STATS] = {0};
106 for (i = 0; i < NUM_LATENCY_STATS; i++) {
107 stats_ptr = RTE_PTR_ADD(glob_stats,
108 lat_stats_strings[i].offset);
109 values[i] = (uint64_t)floor((*stats_ptr)/
110 latencystat_cycles_per_ns());
113 ret = rte_metrics_update_values(RTE_METRICS_GLOBAL,
115 values, NUM_LATENCY_STATS);
117 RTE_LOG(INFO, LATENCY_STATS, "Failed to push the stats\n");
123 rte_latencystats_fill_values(struct rte_metric_value *values)
126 float *stats_ptr = NULL;
128 for (i = 0; i < NUM_LATENCY_STATS; i++) {
129 stats_ptr = RTE_PTR_ADD(glob_stats,
130 lat_stats_strings[i].offset);
132 values[i].value = (uint64_t)floor((*stats_ptr)/
133 latencystat_cycles_per_ns());
138 add_time_stamps(uint16_t pid __rte_unused,
139 uint16_t qid __rte_unused,
140 struct rte_mbuf **pkts,
142 uint16_t max_pkts __rte_unused,
143 void *user_cb __rte_unused)
146 uint64_t diff_tsc, now;
149 * For every sample interval,
150 * time stamp is marked on one received packet.
153 for (i = 0; i < nb_pkts; i++) {
154 diff_tsc = now - prev_tsc;
155 timer_tsc += diff_tsc;
157 if ((pkts[i]->ol_flags & PKT_RX_TIMESTAMP) == 0
158 && (timer_tsc >= samp_intvl)) {
159 pkts[i]->timestamp = now;
160 pkts[i]->ol_flags |= PKT_RX_TIMESTAMP;
171 calc_latency(uint16_t pid __rte_unused,
172 uint16_t qid __rte_unused,
173 struct rte_mbuf **pkts,
175 void *_ __rte_unused)
177 unsigned int i, cnt = 0;
179 float latency[nb_pkts];
180 static float prev_latency;
182 * Alpha represents degree of weighting decrease in EWMA,
183 * a constant smoothing factor between 0 and 1. The value
184 * is used below for measuring average latency.
186 const float alpha = 0.2;
189 for (i = 0; i < nb_pkts; i++) {
190 if (pkts[i]->ol_flags & PKT_RX_TIMESTAMP)
191 latency[cnt++] = now - pkts[i]->timestamp;
194 for (i = 0; i < cnt; i++) {
196 * The jitter is calculated as statistical mean of interpacket
197 * delay variation. The "jitter estimate" is computed by taking
198 * the absolute values of the ipdv sequence and applying an
199 * exponential filter with parameter 1/16 to generate the
200 * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
201 * D(i-1,i) is difference in latency of two consecutive packets
203 * Reference: Calculated as per RFC 5481, sec 4.1,
204 * RFC 3393 sec 4.5, RFC 1889 sec.
206 glob_stats->jitter += (fabsf(prev_latency - latency[i])
207 - glob_stats->jitter)/16;
208 if (glob_stats->min_latency == 0)
209 glob_stats->min_latency = latency[i];
210 else if (latency[i] < glob_stats->min_latency)
211 glob_stats->min_latency = latency[i];
212 else if (latency[i] > glob_stats->max_latency)
213 glob_stats->max_latency = latency[i];
215 * The average latency is measured using exponential moving
216 * average, i.e. using EWMA
217 * https://en.wikipedia.org/wiki/Moving_average
219 glob_stats->avg_latency +=
220 alpha * (latency[i] - glob_stats->avg_latency);
221 prev_latency = latency[i];
228 rte_latencystats_init(uint64_t app_samp_intvl,
229 rte_latency_stats_flow_type_fn user_cb)
234 struct rxtx_cbs *cbs = NULL;
235 const uint16_t nb_ports = rte_eth_dev_count();
236 const char *ptr_strings[NUM_LATENCY_STATS] = {0};
237 const struct rte_memzone *mz = NULL;
238 const unsigned int flags = 0;
240 if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS))
243 /** Allocate stats in shared memory fo multi process support */
244 mz = rte_memzone_reserve(MZ_RTE_LATENCY_STATS, sizeof(*glob_stats),
245 rte_socket_id(), flags);
247 RTE_LOG(ERR, LATENCY_STATS, "Cannot reserve memory: %s:%d\n",
252 glob_stats = mz->addr;
253 samp_intvl = app_samp_intvl * latencystat_cycles_per_ns();
255 /** Register latency stats with stats library */
256 for (i = 0; i < NUM_LATENCY_STATS; i++)
257 ptr_strings[i] = lat_stats_strings[i].name;
259 latency_stats_index = rte_metrics_reg_names(ptr_strings,
261 if (latency_stats_index < 0) {
262 RTE_LOG(DEBUG, LATENCY_STATS,
263 "Failed to register latency stats names\n");
267 /** Register Rx/Tx callbacks */
268 for (pid = 0; pid < nb_ports; pid++) {
269 struct rte_eth_dev_info dev_info;
270 rte_eth_dev_info_get(pid, &dev_info);
271 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
272 cbs = &rx_cbs[pid][qid];
273 cbs->cb = rte_eth_add_first_rx_callback(pid, qid,
274 add_time_stamps, user_cb);
276 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
277 "register Rx callback for pid=%d, "
278 "qid=%d\n", pid, qid);
280 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
281 cbs = &tx_cbs[pid][qid];
282 cbs->cb = rte_eth_add_tx_callback(pid, qid,
283 calc_latency, user_cb);
285 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
286 "register Tx callback for pid=%d, "
287 "qid=%d\n", pid, qid);
294 rte_latencystats_uninit(void)
299 struct rxtx_cbs *cbs = NULL;
300 const uint16_t nb_ports = rte_eth_dev_count();
302 /** De register Rx/Tx callbacks */
303 for (pid = 0; pid < nb_ports; pid++) {
304 struct rte_eth_dev_info dev_info;
305 rte_eth_dev_info_get(pid, &dev_info);
306 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
307 cbs = &rx_cbs[pid][qid];
308 ret = rte_eth_remove_rx_callback(pid, qid, cbs->cb);
310 RTE_LOG(INFO, LATENCY_STATS, "failed to "
311 "remove Rx callback for pid=%d, "
312 "qid=%d\n", pid, qid);
314 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
315 cbs = &tx_cbs[pid][qid];
316 ret = rte_eth_remove_tx_callback(pid, qid, cbs->cb);
318 RTE_LOG(INFO, LATENCY_STATS, "failed to "
319 "remove Tx callback for pid=%d, "
320 "qid=%d\n", pid, qid);
328 rte_latencystats_get_names(struct rte_metric_name *names, uint16_t size)
332 if (names == NULL || size < NUM_LATENCY_STATS)
333 return NUM_LATENCY_STATS;
335 for (i = 0; i < NUM_LATENCY_STATS; i++)
336 snprintf(names[i].name, sizeof(names[i].name),
337 "%s", lat_stats_strings[i].name);
339 return NUM_LATENCY_STATS;
343 rte_latencystats_get(struct rte_metric_value *values, uint16_t size)
345 if (size < NUM_LATENCY_STATS || values == NULL)
346 return NUM_LATENCY_STATS;
348 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
349 const struct rte_memzone *mz;
350 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
352 RTE_LOG(ERR, LATENCY_STATS,
353 "Latency stats memzone not found\n");
356 glob_stats = mz->addr;
359 /* Retrieve latency stats */
360 rte_latencystats_fill_values(values);
362 return NUM_LATENCY_STATS;