New upstream version 18.11-rc1
[deb_dpdk.git] / lib / librte_latencystats / rte_latencystats.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <unistd.h>
6 #include <sys/types.h>
7 #include <stdbool.h>
8 #include <math.h>
9
10 #include <rte_mbuf.h>
11 #include <rte_log.h>
12 #include <rte_cycles.h>
13 #include <rte_ethdev.h>
14 #include <rte_metrics.h>
15 #include <rte_memzone.h>
16 #include <rte_lcore.h>
17
18 #include "rte_latencystats.h"
19
20 /** Nano seconds per second */
21 #define NS_PER_SEC 1E9
22
23 /** Clock cycles per nano second */
24 static uint64_t
25 latencystat_cycles_per_ns(void)
26 {
27         return rte_get_timer_hz() / NS_PER_SEC;
28 }
29
30 /* Macros for printing using RTE_LOG */
31 #define RTE_LOGTYPE_LATENCY_STATS RTE_LOGTYPE_USER1
32
33 static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
34 static int latency_stats_index;
35 static uint64_t samp_intvl;
36 static uint64_t timer_tsc;
37 static uint64_t prev_tsc;
38
39 struct rte_latency_stats {
40         float min_latency; /**< Minimum latency in nano seconds */
41         float avg_latency; /**< Average latency in nano seconds */
42         float max_latency; /**< Maximum latency in nano seconds */
43         float jitter; /** Latency variation */
44 };
45
46 static struct rte_latency_stats *glob_stats;
47
48 struct rxtx_cbs {
49         const struct rte_eth_rxtx_callback *cb;
50 };
51
52 static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
53 static struct rxtx_cbs tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
54
55 struct latency_stats_nameoff {
56         char name[RTE_ETH_XSTATS_NAME_SIZE];
57         unsigned int offset;
58 };
59
60 static const struct latency_stats_nameoff lat_stats_strings[] = {
61         {"min_latency_ns", offsetof(struct rte_latency_stats, min_latency)},
62         {"avg_latency_ns", offsetof(struct rte_latency_stats, avg_latency)},
63         {"max_latency_ns", offsetof(struct rte_latency_stats, max_latency)},
64         {"jitter_ns", offsetof(struct rte_latency_stats, jitter)},
65 };
66
67 #define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
68                                 sizeof(lat_stats_strings[0]))
69
70 int32_t
71 rte_latencystats_update(void)
72 {
73         unsigned int i;
74         float *stats_ptr = NULL;
75         uint64_t values[NUM_LATENCY_STATS] = {0};
76         int ret;
77
78         for (i = 0; i < NUM_LATENCY_STATS; i++) {
79                 stats_ptr = RTE_PTR_ADD(glob_stats,
80                                 lat_stats_strings[i].offset);
81                 values[i] = (uint64_t)floor((*stats_ptr)/
82                                 latencystat_cycles_per_ns());
83         }
84
85         ret = rte_metrics_update_values(RTE_METRICS_GLOBAL,
86                                         latency_stats_index,
87                                         values, NUM_LATENCY_STATS);
88         if (ret < 0)
89                 RTE_LOG(INFO, LATENCY_STATS, "Failed to push the stats\n");
90
91         return ret;
92 }
93
94 static void
95 rte_latencystats_fill_values(struct rte_metric_value *values)
96 {
97         unsigned int i;
98         float *stats_ptr = NULL;
99
100         for (i = 0; i < NUM_LATENCY_STATS; i++) {
101                 stats_ptr = RTE_PTR_ADD(glob_stats,
102                                 lat_stats_strings[i].offset);
103                 values[i].key = i;
104                 values[i].value = (uint64_t)floor((*stats_ptr)/
105                                                 latencystat_cycles_per_ns());
106         }
107 }
108
109 static uint16_t
110 add_time_stamps(uint16_t pid __rte_unused,
111                 uint16_t qid __rte_unused,
112                 struct rte_mbuf **pkts,
113                 uint16_t nb_pkts,
114                 uint16_t max_pkts __rte_unused,
115                 void *user_cb __rte_unused)
116 {
117         unsigned int i;
118         uint64_t diff_tsc, now;
119
120         /*
121          * For every sample interval,
122          * time stamp is marked on one received packet.
123          */
124         now = rte_rdtsc();
125         for (i = 0; i < nb_pkts; i++) {
126                 diff_tsc = now - prev_tsc;
127                 timer_tsc += diff_tsc;
128
129                 if ((pkts[i]->ol_flags & PKT_RX_TIMESTAMP) == 0
130                                 && (timer_tsc >= samp_intvl)) {
131                         pkts[i]->timestamp = now;
132                         pkts[i]->ol_flags |= PKT_RX_TIMESTAMP;
133                         timer_tsc = 0;
134                 }
135                 prev_tsc = now;
136                 now = rte_rdtsc();
137         }
138
139         return nb_pkts;
140 }
141
142 static uint16_t
143 calc_latency(uint16_t pid __rte_unused,
144                 uint16_t qid __rte_unused,
145                 struct rte_mbuf **pkts,
146                 uint16_t nb_pkts,
147                 void *_ __rte_unused)
148 {
149         unsigned int i, cnt = 0;
150         uint64_t now;
151         float latency[nb_pkts];
152         static float prev_latency;
153         /*
154          * Alpha represents degree of weighting decrease in EWMA,
155          * a constant smoothing factor between 0 and 1. The value
156          * is used below for measuring average latency.
157          */
158         const float alpha = 0.2;
159
160         now = rte_rdtsc();
161         for (i = 0; i < nb_pkts; i++) {
162                 if (pkts[i]->ol_flags & PKT_RX_TIMESTAMP)
163                         latency[cnt++] = now - pkts[i]->timestamp;
164         }
165
166         for (i = 0; i < cnt; i++) {
167                 /*
168                  * The jitter is calculated as statistical mean of interpacket
169                  * delay variation. The "jitter estimate" is computed by taking
170                  * the absolute values of the ipdv sequence and applying an
171                  * exponential filter with parameter 1/16 to generate the
172                  * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
173                  * D(i-1,i) is difference in latency of two consecutive packets
174                  * i-1 and i.
175                  * Reference: Calculated as per RFC 5481, sec 4.1,
176                  * RFC 3393 sec 4.5, RFC 1889 sec.
177                  */
178                 glob_stats->jitter +=  (fabsf(prev_latency - latency[i])
179                                         - glob_stats->jitter)/16;
180                 if (glob_stats->min_latency == 0)
181                         glob_stats->min_latency = latency[i];
182                 else if (latency[i] < glob_stats->min_latency)
183                         glob_stats->min_latency = latency[i];
184                 else if (latency[i] > glob_stats->max_latency)
185                         glob_stats->max_latency = latency[i];
186                 /*
187                  * The average latency is measured using exponential moving
188                  * average, i.e. using EWMA
189                  * https://en.wikipedia.org/wiki/Moving_average
190                  */
191                 glob_stats->avg_latency +=
192                         alpha * (latency[i] - glob_stats->avg_latency);
193                 prev_latency = latency[i];
194         }
195
196         return nb_pkts;
197 }
198
199 int
200 rte_latencystats_init(uint64_t app_samp_intvl,
201                 rte_latency_stats_flow_type_fn user_cb)
202 {
203         unsigned int i;
204         uint16_t pid;
205         uint16_t qid;
206         struct rxtx_cbs *cbs = NULL;
207         const char *ptr_strings[NUM_LATENCY_STATS] = {0};
208         const struct rte_memzone *mz = NULL;
209         const unsigned int flags = 0;
210
211         if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS))
212                 return -EEXIST;
213
214         /** Allocate stats in shared memory fo multi process support */
215         mz = rte_memzone_reserve(MZ_RTE_LATENCY_STATS, sizeof(*glob_stats),
216                                         rte_socket_id(), flags);
217         if (mz == NULL) {
218                 RTE_LOG(ERR, LATENCY_STATS, "Cannot reserve memory: %s:%d\n",
219                         __func__, __LINE__);
220                 return -ENOMEM;
221         }
222
223         glob_stats = mz->addr;
224         samp_intvl = app_samp_intvl * latencystat_cycles_per_ns();
225
226         /** Register latency stats with stats library */
227         for (i = 0; i < NUM_LATENCY_STATS; i++)
228                 ptr_strings[i] = lat_stats_strings[i].name;
229
230         latency_stats_index = rte_metrics_reg_names(ptr_strings,
231                                                         NUM_LATENCY_STATS);
232         if (latency_stats_index < 0) {
233                 RTE_LOG(DEBUG, LATENCY_STATS,
234                         "Failed to register latency stats names\n");
235                 return -1;
236         }
237
238         /** Register Rx/Tx callbacks */
239         RTE_ETH_FOREACH_DEV(pid) {
240                 struct rte_eth_dev_info dev_info;
241                 rte_eth_dev_info_get(pid, &dev_info);
242                 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
243                         cbs = &rx_cbs[pid][qid];
244                         cbs->cb = rte_eth_add_first_rx_callback(pid, qid,
245                                         add_time_stamps, user_cb);
246                         if (!cbs->cb)
247                                 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
248                                         "register Rx callback for pid=%d, "
249                                         "qid=%d\n", pid, qid);
250                 }
251                 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
252                         cbs = &tx_cbs[pid][qid];
253                         cbs->cb =  rte_eth_add_tx_callback(pid, qid,
254                                         calc_latency, user_cb);
255                         if (!cbs->cb)
256                                 RTE_LOG(INFO, LATENCY_STATS, "Failed to "
257                                         "register Tx callback for pid=%d, "
258                                         "qid=%d\n", pid, qid);
259                 }
260         }
261         return 0;
262 }
263
264 int
265 rte_latencystats_uninit(void)
266 {
267         uint16_t pid;
268         uint16_t qid;
269         int ret = 0;
270         struct rxtx_cbs *cbs = NULL;
271         const struct rte_memzone *mz = NULL;
272
273         /** De register Rx/Tx callbacks */
274         RTE_ETH_FOREACH_DEV(pid) {
275                 struct rte_eth_dev_info dev_info;
276                 rte_eth_dev_info_get(pid, &dev_info);
277                 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
278                         cbs = &rx_cbs[pid][qid];
279                         ret = rte_eth_remove_rx_callback(pid, qid, cbs->cb);
280                         if (ret)
281                                 RTE_LOG(INFO, LATENCY_STATS, "failed to "
282                                         "remove Rx callback for pid=%d, "
283                                         "qid=%d\n", pid, qid);
284                 }
285                 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
286                         cbs = &tx_cbs[pid][qid];
287                         ret = rte_eth_remove_tx_callback(pid, qid, cbs->cb);
288                         if (ret)
289                                 RTE_LOG(INFO, LATENCY_STATS, "failed to "
290                                         "remove Tx callback for pid=%d, "
291                                         "qid=%d\n", pid, qid);
292                 }
293         }
294
295         /* free up the memzone */
296         mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
297         if (mz)
298                 rte_memzone_free(mz);
299
300         return 0;
301 }
302
303 int
304 rte_latencystats_get_names(struct rte_metric_name *names, uint16_t size)
305 {
306         unsigned int i;
307
308         if (names == NULL || size < NUM_LATENCY_STATS)
309                 return NUM_LATENCY_STATS;
310
311         for (i = 0; i < NUM_LATENCY_STATS; i++)
312                 snprintf(names[i].name, sizeof(names[i].name),
313                                 "%s", lat_stats_strings[i].name);
314
315         return NUM_LATENCY_STATS;
316 }
317
318 int
319 rte_latencystats_get(struct rte_metric_value *values, uint16_t size)
320 {
321         if (size < NUM_LATENCY_STATS || values == NULL)
322                 return NUM_LATENCY_STATS;
323
324         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
325                 const struct rte_memzone *mz;
326                 mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
327                 if (mz == NULL) {
328                         RTE_LOG(ERR, LATENCY_STATS,
329                                 "Latency stats memzone not found\n");
330                         return -ENOMEM;
331                 }
332                 glob_stats =  mz->addr;
333         }
334
335         /* Retrieve latency stats */
336         rte_latencystats_fill_values(values);
337
338         return NUM_LATENCY_STATS;
339 }