4273d9d2f810a6a075cc9d5f9fe125c07a43dbf0
[csit.git] / resources / tools / dash / app / pal / trending / graphs.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """
15 """
16
17 import plotly.graph_objects as go
18 import pandas as pd
19 import re
20
21 import hdrh.histogram
22 import hdrh.codec
23
24 from datetime import datetime
25 from numpy import isnan
26
27 from ..jumpavg import classify
28
29
30 _COLORS = (
31     u"#1A1110", u"#DA2647", u"#214FC6", u"#01786F", u"#BD8260", u"#FFD12A",
32     u"#A6E7FF", u"#738276", u"#C95A49", u"#FC5A8D", u"#CEC8EF", u"#391285",
33     u"#6F2DA8", u"#FF878D", u"#45A27D", u"#FFD0B9", u"#FD5240", u"#DB91EF",
34     u"#44D7A8", u"#4F86F7", u"#84DE02", u"#FFCFF1", u"#614051"
35 )
36 _ANOMALY_COLOR = {
37     u"regression": 0.0,
38     u"normal": 0.5,
39     u"progression": 1.0
40 }
41 _COLORSCALE_TPUT = [
42     [0.00, u"red"],
43     [0.33, u"red"],
44     [0.33, u"white"],
45     [0.66, u"white"],
46     [0.66, u"green"],
47     [1.00, u"green"]
48 ]
49 _TICK_TEXT_TPUT = [u"Regression", u"Normal", u"Progression"]
50 _COLORSCALE_LAT = [
51     [0.00, u"green"],
52     [0.33, u"green"],
53     [0.33, u"white"],
54     [0.66, u"white"],
55     [0.66, u"red"],
56     [1.00, u"red"]
57 ]
58 _TICK_TEXT_LAT = [u"Progression", u"Normal", u"Regression"]
59 _VALUE = {
60     "mrr": "result_receive_rate_rate_avg",
61     "ndr": "result_ndr_lower_rate_value",
62     "pdr": "result_pdr_lower_rate_value",
63     "pdr-lat": "result_latency_forward_pdr_50_avg"
64 }
65 _UNIT = {
66     "mrr": "result_receive_rate_rate_unit",
67     "ndr": "result_ndr_lower_rate_unit",
68     "pdr": "result_pdr_lower_rate_unit",
69     "pdr-lat": "result_latency_forward_pdr_50_unit"
70 }
71 _LAT_HDRH = (  # Do not change the order
72     "result_latency_forward_pdr_0_hdrh",
73     "result_latency_reverse_pdr_0_hdrh",
74     "result_latency_forward_pdr_10_hdrh",
75     "result_latency_reverse_pdr_10_hdrh",
76     "result_latency_forward_pdr_50_hdrh",
77     "result_latency_reverse_pdr_50_hdrh",
78     "result_latency_forward_pdr_90_hdrh",
79     "result_latency_reverse_pdr_90_hdrh",
80 )
81 # This value depends on latency stream rate (9001 pps) and duration (5s).
82 # Keep it slightly higher to ensure rounding errors to not remove tick mark.
83 PERCENTILE_MAX = 99.999501
84
85 _GRAPH_LAT_HDRH_DESC = {
86     u"result_latency_forward_pdr_0_hdrh": u"No-load.",
87     u"result_latency_reverse_pdr_0_hdrh": u"No-load.",
88     u"result_latency_forward_pdr_10_hdrh": u"Low-load, 10% PDR.",
89     u"result_latency_reverse_pdr_10_hdrh": u"Low-load, 10% PDR.",
90     u"result_latency_forward_pdr_50_hdrh": u"Mid-load, 50% PDR.",
91     u"result_latency_reverse_pdr_50_hdrh": u"Mid-load, 50% PDR.",
92     u"result_latency_forward_pdr_90_hdrh": u"High-load, 90% PDR.",
93     u"result_latency_reverse_pdr_90_hdrh": u"High-load, 90% PDR."
94 }
95
96
97 def _get_hdrh_latencies(row: pd.Series, name: str) -> dict:
98     """
99     """
100
101     latencies = {"name": name}
102     for key in _LAT_HDRH:
103         try:
104             latencies[key] = row[key]
105         except KeyError:
106             return None
107
108     return latencies
109
110
111 def _classify_anomalies(data):
112     """Process the data and return anomalies and trending values.
113
114     Gather data into groups with average as trend value.
115     Decorate values within groups to be normal,
116     the first value of changed average as a regression, or a progression.
117
118     :param data: Full data set with unavailable samples replaced by nan.
119     :type data: OrderedDict
120     :returns: Classification and trend values
121     :rtype: 3-tuple, list of strings, list of floats and list of floats
122     """
123     # NaN means something went wrong.
124     # Use 0.0 to cause that being reported as a severe regression.
125     bare_data = [0.0 if isnan(sample) else sample for sample in data.values()]
126     # TODO: Make BitCountingGroupList a subclass of list again?
127     group_list = classify(bare_data).group_list
128     group_list.reverse()  # Just to use .pop() for FIFO.
129     classification = list()
130     avgs = list()
131     stdevs = list()
132     active_group = None
133     values_left = 0
134     avg = 0.0
135     stdv = 0.0
136     for sample in data.values():
137         if isnan(sample):
138             classification.append(u"outlier")
139             avgs.append(sample)
140             stdevs.append(sample)
141             continue
142         if values_left < 1 or active_group is None:
143             values_left = 0
144             while values_left < 1:  # Ignore empty groups (should not happen).
145                 active_group = group_list.pop()
146                 values_left = len(active_group.run_list)
147             avg = active_group.stats.avg
148             stdv = active_group.stats.stdev
149             classification.append(active_group.comment)
150             avgs.append(avg)
151             stdevs.append(stdv)
152             values_left -= 1
153             continue
154         classification.append(u"normal")
155         avgs.append(avg)
156         stdevs.append(stdv)
157         values_left -= 1
158     return classification, avgs, stdevs
159
160
161 def select_trending_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame:
162     """
163     """
164
165     phy = itm["phy"].split("-")
166     if len(phy) == 4:
167         topo, arch, nic, drv = phy
168         if drv in ("dpdk", "ixgbe"):
169             drv = ""
170         else:
171             drv += "-"
172             drv = drv.replace("_", "-")
173     else:
174         return None
175     cadence = \
176         "weekly" if (arch == "aws" or itm["testtype"] != "mrr") else "daily"
177     sel_topo_arch = (
178         f"csit-vpp-perf-"
179         f"{itm['testtype'] if itm['testtype'] == 'mrr' else 'ndrpdr'}-"
180         f"{cadence}-master-{topo}-{arch}"
181     )
182     df_sel = data.loc[(data["job"] == sel_topo_arch)]
183     regex = (
184         f"^.*{nic}.*\.{itm['framesize']}-{itm['core']}-{drv}{itm['test']}-"
185         f"{'mrr' if itm['testtype'] == 'mrr' else 'ndrpdr'}$"
186     )
187     df = df_sel.loc[
188         df_sel["test_id"].apply(
189             lambda x: True if re.search(regex, x) else False
190         )
191     ].sort_values(by="start_time", ignore_index=True)
192
193     return df
194
195
196 def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame,
197     start: datetime, end: datetime, color: str) -> list:
198     """
199     """
200
201     df = df.dropna(subset=[_VALUE[ttype], ])
202     if df.empty:
203         return list()
204
205     x_axis = [d for d in df["start_time"] if d >= start and d <= end]
206
207     anomalies, trend_avg, trend_stdev = _classify_anomalies(
208         {k: v for k, v in zip(x_axis, df[_VALUE[ttype]])}
209     )
210
211     hover = list()
212     customdata = list()
213     for _, row in df.iterrows():
214         hover_itm = (
215             f"date: {row['start_time'].strftime('%d-%m-%Y %H:%M:%S')}<br>"
216             f"<prop> [{row[_UNIT[ttype]]}]: {row[_VALUE[ttype]]}<br>"
217             f"<stdev>"
218             f"{row['dut_type']}-ref: {row['dut_version']}<br>"
219             f"csit-ref: {row['job']}/{row['build']}<br>"
220             f"hosts: {', '.join(row['hosts'])}"
221         )
222         if ttype == "mrr":
223             stdev = (
224                 f"stdev [{row['result_receive_rate_rate_unit']}]: "
225                 f"{row['result_receive_rate_rate_stdev']}<br>"
226             )
227         else:
228             stdev = ""
229         hover_itm = hover_itm.replace(
230             "<prop>", "latency" if ttype == "pdr-lat" else "average"
231         ).replace("<stdev>", stdev)
232         hover.append(hover_itm)
233         if ttype == "pdr-lat":
234             customdata.append(_get_hdrh_latencies(row, name))
235
236     hover_trend = list()
237     for avg, stdev, (_, row) in zip(trend_avg, trend_stdev, df.iterrows()):
238         hover_itm = (
239             f"date: {row['start_time'].strftime('%d-%m-%Y %H:%M:%S')}<br>"
240             f"trend [pps]: {avg}<br>"
241             f"stdev [pps]: {stdev}<br>"
242             f"{row['dut_type']}-ref: {row['dut_version']}<br>"
243             f"csit-ref: {row['job']}/{row['build']}<br>"
244             f"hosts: {', '.join(row['hosts'])}"
245         )
246         if ttype == "pdr-lat":
247             hover_itm = hover_itm.replace("[pps]", "[us]")
248         hover_trend.append(hover_itm)
249
250     traces = [
251         go.Scatter(  # Samples
252             x=x_axis,
253             y=df[_VALUE[ttype]],
254             name=name,
255             mode="markers",
256             marker={
257                 u"size": 5,
258                 u"color": color,
259                 u"symbol": u"circle",
260             },
261             text=hover,
262             hoverinfo=u"text+name",
263             showlegend=True,
264             legendgroup=name,
265             customdata=customdata
266         ),
267         go.Scatter(  # Trend line
268             x=x_axis,
269             y=trend_avg,
270             name=name,
271             mode="lines",
272             line={
273                 u"shape": u"linear",
274                 u"width": 1,
275                 u"color": color,
276             },
277             text=hover_trend,
278             hoverinfo=u"text+name",
279             showlegend=False,
280             legendgroup=name,
281         )
282     ]
283
284     if anomalies:
285         anomaly_x = list()
286         anomaly_y = list()
287         anomaly_color = list()
288         for idx, anomaly in enumerate(anomalies):
289             if anomaly in (u"regression", u"progression"):
290                 anomaly_x.append(x_axis[idx])
291                 anomaly_y.append(trend_avg[idx])
292                 anomaly_color.append(_ANOMALY_COLOR[anomaly])
293         anomaly_color.extend([0.0, 0.5, 1.0])
294         traces.append(
295             go.Scatter(
296                 x=anomaly_x,
297                 y=anomaly_y,
298                 mode=u"markers",
299                 hoverinfo=u"none",
300                 showlegend=False,
301                 legendgroup=name,
302                 name=f"{name}-anomalies",
303                 marker={
304                     u"size": 15,
305                     u"symbol": u"circle-open",
306                     u"color": anomaly_color,
307                     u"colorscale": _COLORSCALE_LAT \
308                         if ttype == "pdr-lat" else _COLORSCALE_TPUT,
309                     u"showscale": True,
310                     u"line": {
311                         u"width": 2
312                     },
313                     u"colorbar": {
314                         u"y": 0.5,
315                         u"len": 0.8,
316                         u"title": u"Circles Marking Data Classification",
317                         u"titleside": u"right",
318                         # u"titlefont": {
319                         #     u"size": 14
320                         # },
321                         u"tickmode": u"array",
322                         u"tickvals": [0.167, 0.500, 0.833],
323                         u"ticktext": _TICK_TEXT_LAT \
324                             if ttype == "pdr-lat" else _TICK_TEXT_TPUT,
325                         u"ticks": u"",
326                         u"ticklen": 0,
327                         u"tickangle": -90,
328                         u"thickness": 10
329                     }
330                 }
331             )
332         )
333
334     return traces
335
336
337 def graph_trending(data: pd.DataFrame, sel:dict, layout: dict,
338     start: datetime, end: datetime) -> tuple:
339     """
340     """
341
342     if not sel:
343         return None, None
344
345     fig_tput = None
346     fig_lat = None
347     for idx, itm in enumerate(sel):
348
349         df = select_trending_data(data, itm)
350         if df is None:
351             continue
352
353         name = (
354             f"{itm['phy']}-{itm['framesize']}-{itm['core']}-"
355             f"{itm['test']}-{itm['testtype']}"
356         )
357
358         traces = _generate_trending_traces(
359             itm["testtype"], name, df, start, end, _COLORS[idx % len(_COLORS)]
360         )
361         if traces:
362             if not fig_tput:
363                 fig_tput = go.Figure()
364             fig_tput.add_traces(traces)
365
366         if itm["testtype"] == "pdr":
367             traces = _generate_trending_traces(
368                 "pdr-lat", name, df, start, end, _COLORS[idx % len(_COLORS)]
369             )
370             if traces:
371                 if not fig_lat:
372                     fig_lat = go.Figure()
373                 fig_lat.add_traces(traces)
374
375     if fig_tput:
376         fig_tput.update_layout(layout.get("plot-trending-tput", dict()))
377     if fig_lat:
378         fig_lat.update_layout(layout.get("plot-trending-lat", dict()))
379
380     return fig_tput, fig_lat
381
382
383 def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure:
384     """
385     """
386
387     fig = None
388
389     traces = list()
390     for idx, (lat_name, lat_hdrh) in enumerate(data.items()):
391         try:
392             decoded = hdrh.histogram.HdrHistogram.decode(lat_hdrh)
393         except (hdrh.codec.HdrLengthException, TypeError) as err:
394             continue
395         previous_x = 0.0
396         prev_perc = 0.0
397         xaxis = list()
398         yaxis = list()
399         hovertext = list()
400         for item in decoded.get_recorded_iterator():
401             # The real value is "percentile".
402             # For 100%, we cut that down to "x_perc" to avoid
403             # infinity.
404             percentile = item.percentile_level_iterated_to
405             x_perc = min(percentile, PERCENTILE_MAX)
406             xaxis.append(previous_x)
407             yaxis.append(item.value_iterated_to)
408             hovertext.append(
409                 f"<b>{_GRAPH_LAT_HDRH_DESC[lat_name]}</b><br>"
410                 f"Direction: {(u'W-E', u'E-W')[idx % 2]}<br>"
411                 f"Percentile: {prev_perc:.5f}-{percentile:.5f}%<br>"
412                 f"Latency: {item.value_iterated_to}uSec"
413             )
414             next_x = 100.0 / (100.0 - x_perc)
415             xaxis.append(next_x)
416             yaxis.append(item.value_iterated_to)
417             hovertext.append(
418                 f"<b>{_GRAPH_LAT_HDRH_DESC[lat_name]}</b><br>"
419                 f"Direction: {(u'W-E', u'E-W')[idx % 2]}<br>"
420                 f"Percentile: {prev_perc:.5f}-{percentile:.5f}%<br>"
421                 f"Latency: {item.value_iterated_to}uSec"
422             )
423             previous_x = next_x
424             prev_perc = percentile
425
426         traces.append(
427             go.Scatter(
428                 x=xaxis,
429                 y=yaxis,
430                 name=_GRAPH_LAT_HDRH_DESC[lat_name],
431                 mode=u"lines",
432                 legendgroup=_GRAPH_LAT_HDRH_DESC[lat_name],
433                 showlegend=bool(idx % 2),
434                 line=dict(
435                     color=_COLORS[int(idx/2)],
436                     dash=u"solid",
437                     width=1 if idx % 2 else 2
438                 ),
439                 hovertext=hovertext,
440                 hoverinfo=u"text"
441             )
442         )
443     if traces:
444         fig = go.Figure()
445         fig.add_traces(traces)
446         layout_hdrh = layout.get("plot-hdrh-latency", None)
447         if lat_hdrh:
448             fig.update_layout(layout_hdrh)
449
450     return fig