1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
17 import plotly.graph_objects as go
23 from datetime import datetime
24 from numpy import isnan
26 from ..jumpavg import classify
30 u"#1A1110", u"#DA2647", u"#214FC6", u"#01786F", u"#BD8260", u"#FFD12A",
31 u"#A6E7FF", u"#738276", u"#C95A49", u"#FC5A8D", u"#CEC8EF", u"#391285",
32 u"#6F2DA8", u"#FF878D", u"#45A27D", u"#FFD0B9", u"#FD5240", u"#DB91EF",
33 u"#44D7A8", u"#4F86F7", u"#84DE02", u"#FFCFF1", u"#614051"
48 _TICK_TEXT_TPUT = [u"Regression", u"Normal", u"Progression"]
57 _TICK_TEXT_LAT = [u"Progression", u"Normal", u"Regression"]
59 "mrr": "result_receive_rate_rate_avg",
60 "ndr": "result_ndr_lower_rate_value",
61 "pdr": "result_pdr_lower_rate_value",
62 "pdr-lat": "result_latency_forward_pdr_50_avg"
65 "mrr": "result_receive_rate_rate_unit",
66 "ndr": "result_ndr_lower_rate_unit",
67 "pdr": "result_pdr_lower_rate_unit",
68 "pdr-lat": "result_latency_forward_pdr_50_unit"
70 _LAT_HDRH = ( # Do not change the order
71 "result_latency_forward_pdr_0_hdrh",
72 "result_latency_reverse_pdr_0_hdrh",
73 "result_latency_forward_pdr_10_hdrh",
74 "result_latency_reverse_pdr_10_hdrh",
75 "result_latency_forward_pdr_50_hdrh",
76 "result_latency_reverse_pdr_50_hdrh",
77 "result_latency_forward_pdr_90_hdrh",
78 "result_latency_reverse_pdr_90_hdrh",
80 # This value depends on latency stream rate (9001 pps) and duration (5s).
81 # Keep it slightly higher to ensure rounding errors to not remove tick mark.
82 PERCENTILE_MAX = 99.999501
84 _GRAPH_LAT_HDRH_DESC = {
85 u"result_latency_forward_pdr_0_hdrh": u"No-load.",
86 u"result_latency_reverse_pdr_0_hdrh": u"No-load.",
87 u"result_latency_forward_pdr_10_hdrh": u"Low-load, 10% PDR.",
88 u"result_latency_reverse_pdr_10_hdrh": u"Low-load, 10% PDR.",
89 u"result_latency_forward_pdr_50_hdrh": u"Mid-load, 50% PDR.",
90 u"result_latency_reverse_pdr_50_hdrh": u"Mid-load, 50% PDR.",
91 u"result_latency_forward_pdr_90_hdrh": u"High-load, 90% PDR.",
92 u"result_latency_reverse_pdr_90_hdrh": u"High-load, 90% PDR."
96 def _get_hdrh_latencies(row: pd.Series, name: str) -> dict:
100 latencies = {"name": name}
101 for key in _LAT_HDRH:
103 latencies[key] = row[key]
110 def _classify_anomalies(data):
111 """Process the data and return anomalies and trending values.
113 Gather data into groups with average as trend value.
114 Decorate values within groups to be normal,
115 the first value of changed average as a regression, or a progression.
117 :param data: Full data set with unavailable samples replaced by nan.
118 :type data: OrderedDict
119 :returns: Classification and trend values
120 :rtype: 3-tuple, list of strings, list of floats and list of floats
122 # NaN means something went wrong.
123 # Use 0.0 to cause that being reported as a severe regression.
124 bare_data = [0.0 if isnan(sample) else sample for sample in data.values()]
125 # TODO: Make BitCountingGroupList a subclass of list again?
126 group_list = classify(bare_data).group_list
127 group_list.reverse() # Just to use .pop() for FIFO.
128 classification = list()
135 for sample in data.values():
137 classification.append(u"outlier")
139 stdevs.append(sample)
141 if values_left < 1 or active_group is None:
143 while values_left < 1: # Ignore empty groups (should not happen).
144 active_group = group_list.pop()
145 values_left = len(active_group.run_list)
146 avg = active_group.stats.avg
147 stdv = active_group.stats.stdev
148 classification.append(active_group.comment)
153 classification.append(u"normal")
157 return classification, avgs, stdevs
160 def select_trending_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame:
164 phy = itm["phy"].split("-")
166 topo, arch, nic, drv = phy
171 drv = drv.replace("_", "-")
175 core = str() if itm["dut"] == "trex" else f"{itm['core']}"
176 ttype = "ndrpdr" if itm["testtype"] in ("ndr", "pdr") else itm["testtype"]
177 dut = "none" if itm["dut"] == "trex" else itm["dut"].upper()
180 (data["dut_type"] == dut) &
181 (data["test_type"] == ttype) &
182 (data["passed"] == True)
184 df = df[df.job.str.endswith(f"{topo}-{arch}")]
185 df = df[df.test_id.str.contains(
186 f"^.*[.|-]{nic}.*{itm['framesize']}-{core}-{drv}{itm['test']}-{ttype}$",
188 )].sort_values(by="start_time", ignore_index=True)
193 def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame,
194 start: datetime, end: datetime, color: str) -> list:
198 df = df.dropna(subset=[_VALUE[ttype], ])
201 df = df.loc[((df["start_time"] >= start) & (df["start_time"] <= end))]
205 x_axis = df["start_time"].tolist()
207 anomalies, trend_avg, trend_stdev = _classify_anomalies(
208 {k: v for k, v in zip(x_axis, df[_VALUE[ttype]])}
213 for _, row in df.iterrows():
214 d_type = "trex" if row["dut_type"] == "none" else row["dut_type"]
216 f"date: {row['start_time'].strftime('%d-%m-%Y %H:%M:%S')}<br>"
217 f"<prop> [{row[_UNIT[ttype]]}]: {row[_VALUE[ttype]]}<br>"
219 f"{d_type}-ref: {row['dut_version']}<br>"
220 f"csit-ref: {row['job']}/{row['build']}<br>"
221 f"hosts: {', '.join(row['hosts'])}"
225 f"stdev [{row['result_receive_rate_rate_unit']}]: "
226 f"{row['result_receive_rate_rate_stdev']}<br>"
230 hover_itm = hover_itm.replace(
231 "<prop>", "latency" if ttype == "pdr-lat" else "average"
232 ).replace("<stdev>", stdev)
233 hover.append(hover_itm)
234 if ttype == "pdr-lat":
235 customdata.append(_get_hdrh_latencies(row, name))
238 for avg, stdev, (_, row) in zip(trend_avg, trend_stdev, df.iterrows()):
239 d_type = "trex" if row["dut_type"] == "none" else row["dut_type"]
241 f"date: {row['start_time'].strftime('%d-%m-%Y %H:%M:%S')}<br>"
242 f"trend [pps]: {avg}<br>"
243 f"stdev [pps]: {stdev}<br>"
244 f"{d_type}-ref: {row['dut_version']}<br>"
245 f"csit-ref: {row['job']}/{row['build']}<br>"
246 f"hosts: {', '.join(row['hosts'])}"
248 if ttype == "pdr-lat":
249 hover_itm = hover_itm.replace("[pps]", "[us]")
250 hover_trend.append(hover_itm)
253 go.Scatter( # Samples
261 u"symbol": u"circle",
264 hoverinfo=u"text+name",
267 customdata=customdata
269 go.Scatter( # Trend line
280 hoverinfo=u"text+name",
289 anomaly_color = list()
291 for idx, anomaly in enumerate(anomalies):
292 if anomaly in (u"regression", u"progression"):
293 anomaly_x.append(x_axis[idx])
294 anomaly_y.append(trend_avg[idx])
295 anomaly_color.append(_ANOMALY_COLOR[anomaly])
297 f"date: {x_axis[idx].strftime('%d-%m-%Y %H:%M:%S')}<br>"
298 f"trend [pps]: {trend_avg[idx]}<br>"
299 f"classification: {anomaly}"
301 if ttype == "pdr-lat":
302 hover_itm = hover_itm.replace("[pps]", "[us]")
303 hover.append(hover_itm)
304 anomaly_color.extend([0.0, 0.5, 1.0])
311 hoverinfo=u"text+name",
317 u"symbol": u"circle-open",
318 u"color": anomaly_color,
319 u"colorscale": _COLORSCALE_LAT \
320 if ttype == "pdr-lat" else _COLORSCALE_TPUT,
328 u"title": u"Circles Marking Data Classification",
329 u"titleside": u"right",
330 u"tickmode": u"array",
331 u"tickvals": [0.167, 0.500, 0.833],
332 u"ticktext": _TICK_TEXT_LAT \
333 if ttype == "pdr-lat" else _TICK_TEXT_TPUT,
346 def graph_trending(data: pd.DataFrame, sel:dict, layout: dict,
347 start: datetime, end: datetime) -> tuple:
356 for idx, itm in enumerate(sel):
358 df = select_trending_data(data, itm)
359 if df is None or df.empty:
362 name = "-".join((itm["dut"], itm["phy"], itm["framesize"], itm["core"],
363 itm["test"], itm["testtype"], ))
364 traces = _generate_trending_traces(
365 itm["testtype"], name, df, start, end, _COLORS[idx % len(_COLORS)]
369 fig_tput = go.Figure()
370 fig_tput.add_traces(traces)
372 if itm["testtype"] == "pdr":
373 traces = _generate_trending_traces(
374 "pdr-lat", name, df, start, end, _COLORS[idx % len(_COLORS)]
378 fig_lat = go.Figure()
379 fig_lat.add_traces(traces)
382 fig_tput.update_layout(layout.get("plot-trending-tput", dict()))
384 fig_lat.update_layout(layout.get("plot-trending-lat", dict()))
386 return fig_tput, fig_lat
389 def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure:
396 for idx, (lat_name, lat_hdrh) in enumerate(data.items()):
398 decoded = hdrh.histogram.HdrHistogram.decode(lat_hdrh)
399 except (hdrh.codec.HdrLengthException, TypeError) as err:
406 for item in decoded.get_recorded_iterator():
407 # The real value is "percentile".
408 # For 100%, we cut that down to "x_perc" to avoid
410 percentile = item.percentile_level_iterated_to
411 x_perc = min(percentile, PERCENTILE_MAX)
412 xaxis.append(previous_x)
413 yaxis.append(item.value_iterated_to)
415 f"<b>{_GRAPH_LAT_HDRH_DESC[lat_name]}</b><br>"
416 f"Direction: {(u'W-E', u'E-W')[idx % 2]}<br>"
417 f"Percentile: {prev_perc:.5f}-{percentile:.5f}%<br>"
418 f"Latency: {item.value_iterated_to}uSec"
420 next_x = 100.0 / (100.0 - x_perc)
422 yaxis.append(item.value_iterated_to)
424 f"<b>{_GRAPH_LAT_HDRH_DESC[lat_name]}</b><br>"
425 f"Direction: {(u'W-E', u'E-W')[idx % 2]}<br>"
426 f"Percentile: {prev_perc:.5f}-{percentile:.5f}%<br>"
427 f"Latency: {item.value_iterated_to}uSec"
430 prev_perc = percentile
436 name=_GRAPH_LAT_HDRH_DESC[lat_name],
438 legendgroup=_GRAPH_LAT_HDRH_DESC[lat_name],
439 showlegend=bool(idx % 2),
441 color=_COLORS[int(idx/2)],
443 width=1 if idx % 2 else 2
451 fig.add_traces(traces)
452 layout_hdrh = layout.get("plot-hdrh-latency", None)
454 fig.update_layout(layout_hdrh)