X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fdash%2Fapp%2Fpal%2Ftrending%2Fgraphs.py;h=06bea25466cf4c123aa7b528dcc0145a7fee7e45;hb=refs%2Fchanges%2F94%2F36794%2F4;hp=52e86d8e8386183c3c93f6d19c97730c5400afe3;hpb=88853d7d4e4461198213b9db021fe4e4985c9533;p=csit.git diff --git a/resources/tools/dash/app/pal/trending/graphs.py b/resources/tools/dash/app/pal/trending/graphs.py index 52e86d8e83..06bea25466 100644 --- a/resources/tools/dash/app/pal/trending/graphs.py +++ b/resources/tools/dash/app/pal/trending/graphs.py @@ -21,84 +21,24 @@ import hdrh.histogram import hdrh.codec from datetime import datetime -from numpy import isnan - -from ..jumpavg import classify - - -_COLORS = ( - u"#1A1110", u"#DA2647", u"#214FC6", u"#01786F", u"#BD8260", u"#FFD12A", - u"#A6E7FF", u"#738276", u"#C95A49", u"#FC5A8D", u"#CEC8EF", u"#391285", - u"#6F2DA8", u"#FF878D", u"#45A27D", u"#FFD0B9", u"#FD5240", u"#DB91EF", - u"#44D7A8", u"#4F86F7", u"#84DE02", u"#FFCFF1", u"#614051" -) -_ANOMALY_COLOR = { - u"regression": 0.0, - u"normal": 0.5, - u"progression": 1.0 -} -_COLORSCALE_TPUT = [ - [0.00, u"red"], - [0.33, u"red"], - [0.33, u"white"], - [0.66, u"white"], - [0.66, u"green"], - [1.00, u"green"] -] -_TICK_TEXT_TPUT = [u"Regression", u"Normal", u"Progression"] -_COLORSCALE_LAT = [ - [0.00, u"green"], - [0.33, u"green"], - [0.33, u"white"], - [0.66, u"white"], - [0.66, u"red"], - [1.00, u"red"] -] -_TICK_TEXT_LAT = [u"Progression", u"Normal", u"Regression"] -_VALUE = { - "mrr": "result_receive_rate_rate_avg", - "ndr": "result_ndr_lower_rate_value", - "pdr": "result_pdr_lower_rate_value", - "pdr-lat": "result_latency_forward_pdr_50_avg" -} -_UNIT = { - "mrr": "result_receive_rate_rate_unit", - "ndr": "result_ndr_lower_rate_unit", - "pdr": "result_pdr_lower_rate_unit", - "pdr-lat": "result_latency_forward_pdr_50_unit" -} -_LAT_HDRH = ( # Do not change the order - "result_latency_forward_pdr_0_hdrh", - "result_latency_reverse_pdr_0_hdrh", - "result_latency_forward_pdr_10_hdrh", - "result_latency_reverse_pdr_10_hdrh", - "result_latency_forward_pdr_50_hdrh", - "result_latency_reverse_pdr_50_hdrh", - "result_latency_forward_pdr_90_hdrh", - "result_latency_reverse_pdr_90_hdrh", -) -# This value depends on latency stream rate (9001 pps) and duration (5s). -# Keep it slightly higher to ensure rounding errors to not remove tick mark. -PERCENTILE_MAX = 99.999501 - -_GRAPH_LAT_HDRH_DESC = { - u"result_latency_forward_pdr_0_hdrh": u"No-load.", - u"result_latency_reverse_pdr_0_hdrh": u"No-load.", - u"result_latency_forward_pdr_10_hdrh": u"Low-load, 10% PDR.", - u"result_latency_reverse_pdr_10_hdrh": u"Low-load, 10% PDR.", - u"result_latency_forward_pdr_50_hdrh": u"Mid-load, 50% PDR.", - u"result_latency_reverse_pdr_50_hdrh": u"Mid-load, 50% PDR.", - u"result_latency_forward_pdr_90_hdrh": u"High-load, 90% PDR.", - u"result_latency_reverse_pdr_90_hdrh": u"High-load, 90% PDR." -} + +from ..utils.constants import Constants as C +from ..utils.utils import classify_anomalies, get_color def _get_hdrh_latencies(row: pd.Series, name: str) -> dict: - """ + """Get the HDRH latencies from the test data. + + :param row: A row fron the data frame with test data. + :param name: The test name to be displayed as the graph title. + :type row: pandas.Series + :type name: str + :returns: Dictionary with HDRH latencies. + :rtype: dict """ latencies = {"name": name} - for key in _LAT_HDRH: + for key in C.LAT_HDRH: try: latencies[key] = row[key] except KeyError: @@ -107,58 +47,16 @@ def _get_hdrh_latencies(row: pd.Series, name: str) -> dict: return latencies -def _classify_anomalies(data): - """Process the data and return anomalies and trending values. - - Gather data into groups with average as trend value. - Decorate values within groups to be normal, - the first value of changed average as a regression, or a progression. - - :param data: Full data set with unavailable samples replaced by nan. - :type data: OrderedDict - :returns: Classification and trend values - :rtype: 3-tuple, list of strings, list of floats and list of floats - """ - # NaN means something went wrong. - # Use 0.0 to cause that being reported as a severe regression. - bare_data = [0.0 if isnan(sample) else sample for sample in data.values()] - # TODO: Make BitCountingGroupList a subclass of list again? - group_list = classify(bare_data).group_list - group_list.reverse() # Just to use .pop() for FIFO. - classification = list() - avgs = list() - stdevs = list() - active_group = None - values_left = 0 - avg = 0.0 - stdv = 0.0 - for sample in data.values(): - if isnan(sample): - classification.append(u"outlier") - avgs.append(sample) - stdevs.append(sample) - continue - if values_left < 1 or active_group is None: - values_left = 0 - while values_left < 1: # Ignore empty groups (should not happen). - active_group = group_list.pop() - values_left = len(active_group.run_list) - avg = active_group.stats.avg - stdv = active_group.stats.stdev - classification.append(active_group.comment) - avgs.append(avg) - stdevs.append(stdv) - values_left -= 1 - continue - classification.append(u"normal") - avgs.append(avg) - stdevs.append(stdv) - values_left -= 1 - return classification, avgs, stdevs - - def select_trending_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame: - """ + """Select the data for graphs from the provided data frame. + + :param data: Data frame with data for graphs. + :param itm: Item (in this case job name) which data will be selected from + the input data frame. + :type data: pandas.DataFrame + :type itm: str + :returns: A data frame with selected data. + :rtype: pandas.DataFrame """ phy = itm["phy"].split("-") @@ -174,10 +72,20 @@ def select_trending_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame: core = str() if itm["dut"] == "trex" else f"{itm['core']}" ttype = "ndrpdr" if itm["testtype"] in ("ndr", "pdr") else itm["testtype"] - dut = "none" if itm["dut"] == "trex" else itm["dut"].upper() + dut_v100 = "none" if itm["dut"] == "trex" else itm["dut"] + dut_v101 = itm["dut"] df = data.loc[( - (data["dut_type"] == dut) & + ( + ( + (data["version"] == "1.0.0") & + (data["dut_type"].str.lower() == dut_v100) + ) | + ( + (data["version"] == "1.0.1") & + (data["dut_type"].str.lower() == dut_v101) + ) + ) & (data["test_type"] == ttype) & (data["passed"] == True) )] @@ -191,11 +99,29 @@ def select_trending_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame: def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame, - start: datetime, end: datetime, color: str) -> list: - """ + start: datetime, end: datetime, color: str, norm_factor: float) -> list: + """Generate the trending traces for the trending graph. + + :param ttype: Test type (MRR, NDR, PDR). + :param name: The test name to be displayed as the graph title. + :param df: Data frame with test data. + :param start: The date (and time) when the selected data starts. + :param end: The date (and time) when the selected data ends. + :param color: The color of the trace (samples and trend line). + :param norm_factor: The factor used for normalization of the results to CPU + frequency set to Constants.NORM_FREQUENCY. + :type ttype: str + :type name: str + :type df: pandas.DataFrame + :type start: datetime.datetime + :type end: datetime.datetime + :type color: str + :type norm_factor: float + :returns: Traces (samples, trending line, anomalies) + :rtype: list """ - df = df.dropna(subset=[_VALUE[ttype], ]) + df = df.dropna(subset=[C.VALUE[ttype], ]) if df.empty: return list() df = df.loc[((df["start_time"] >= start) & (df["start_time"] <= end))] @@ -203,18 +129,22 @@ def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame, return list() x_axis = df["start_time"].tolist() + if ttype == "pdr-lat": + y_data = [(itm / norm_factor) for itm in df[C.VALUE[ttype]].tolist()] + else: + y_data = [(itm * norm_factor) for itm in df[C.VALUE[ttype]].tolist()] - anomalies, trend_avg, trend_stdev = _classify_anomalies( - {k: v for k, v in zip(x_axis, df[_VALUE[ttype]])} + anomalies, trend_avg, trend_stdev = classify_anomalies( + {k: v for k, v in zip(x_axis, y_data)} ) hover = list() customdata = list() - for _, row in df.iterrows(): + for idx, (_, row) in enumerate(df.iterrows()): d_type = "trex" if row["dut_type"] == "none" else row["dut_type"] hover_itm = ( - f"date: {row['start_time'].strftime('%d-%m-%Y %H:%M:%S')}
" - f" [{row[_UNIT[ttype]]}]: {row[_VALUE[ttype]]}
" + f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}
" + f" [{row[C.UNIT[ttype]]}]: {y_data[idx]:,.0f}
" f"" f"{d_type}-ref: {row['dut_version']}
" f"csit-ref: {row['job']}/{row['build']}
" @@ -223,7 +153,7 @@ def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame, if ttype == "mrr": stdev = ( f"stdev [{row['result_receive_rate_rate_unit']}]: " - f"{row['result_receive_rate_rate_stdev']}
" + f"{row['result_receive_rate_rate_stdev']:,.0f}
" ) else: stdev = "" @@ -238,9 +168,9 @@ def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame, for avg, stdev, (_, row) in zip(trend_avg, trend_stdev, df.iterrows()): d_type = "trex" if row["dut_type"] == "none" else row["dut_type"] hover_itm = ( - f"date: {row['start_time'].strftime('%d-%m-%Y %H:%M:%S')}
" - f"trend [pps]: {avg}
" - f"stdev [pps]: {stdev}
" + f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}
" + f"trend [pps]: {avg:,.0f}
" + f"stdev [pps]: {stdev:,.0f}
" f"{d_type}-ref: {row['dut_version']}
" f"csit-ref: {row['job']}/{row['build']}
" f"hosts: {', '.join(row['hosts'])}" @@ -252,16 +182,16 @@ def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame, traces = [ go.Scatter( # Samples x=x_axis, - y=df[_VALUE[ttype]], + y=y_data, name=name, mode="markers", marker={ - u"size": 5, - u"color": color, - u"symbol": u"circle", + "size": 5, + "color": color, + "symbol": "circle", }, text=hover, - hoverinfo=u"text+name", + hoverinfo="text+name", showlegend=True, legendgroup=name, customdata=customdata @@ -272,12 +202,12 @@ def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame, name=name, mode="lines", line={ - u"shape": u"linear", - u"width": 1, - u"color": color, + "shape": "linear", + "width": 1, + "color": color, }, text=hover_trend, - hoverinfo=u"text+name", + hoverinfo="text+name", showlegend=False, legendgroup=name, ) @@ -289,13 +219,13 @@ def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame, anomaly_color = list() hover = list() for idx, anomaly in enumerate(anomalies): - if anomaly in (u"regression", u"progression"): + if anomaly in ("regression", "progression"): anomaly_x.append(x_axis[idx]) anomaly_y.append(trend_avg[idx]) - anomaly_color.append(_ANOMALY_COLOR[anomaly]) + anomaly_color.append(C.ANOMALY_COLOR[anomaly]) hover_itm = ( - f"date: {x_axis[idx].strftime('%d-%m-%Y %H:%M:%S')}
" - f"trend [pps]: {trend_avg[idx]}
" + f"date: {x_axis[idx].strftime('%Y-%m-%d %H:%M:%S')}
" + f"trend [pps]: {trend_avg[idx]:,.0f}
" f"classification: {anomaly}" ) if ttype == "pdr-lat": @@ -306,35 +236,35 @@ def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame, go.Scatter( x=anomaly_x, y=anomaly_y, - mode=u"markers", + mode="markers", text=hover, - hoverinfo=u"text+name", + hoverinfo="text+name", showlegend=False, legendgroup=name, name=name, marker={ - u"size": 15, - u"symbol": u"circle-open", - u"color": anomaly_color, - u"colorscale": _COLORSCALE_LAT \ - if ttype == "pdr-lat" else _COLORSCALE_TPUT, - u"showscale": True, - u"line": { - u"width": 2 + "size": 15, + "symbol": "circle-open", + "color": anomaly_color, + "colorscale": C.COLORSCALE_LAT \ + if ttype == "pdr-lat" else C.COLORSCALE_TPUT, + "showscale": True, + "line": { + "width": 2 }, - u"colorbar": { - u"y": 0.5, - u"len": 0.8, - u"title": u"Circles Marking Data Classification", - u"titleside": u"right", - u"tickmode": u"array", - u"tickvals": [0.167, 0.500, 0.833], - u"ticktext": _TICK_TEXT_LAT \ - if ttype == "pdr-lat" else _TICK_TEXT_TPUT, - u"ticks": u"", - u"ticklen": 0, - u"tickangle": -90, - u"thickness": 10 + "colorbar": { + "y": 0.5, + "len": 0.8, + "title": "Circles Marking Data Classification", + "titleside": "right", + "tickmode": "array", + "tickvals": [0.167, 0.500, 0.833], + "ticktext": C.TICK_TEXT_LAT \ + if ttype == "pdr-lat" else C.TICK_TEXT_TPUT, + "ticks": "", + "ticklen": 0, + "tickangle": -90, + "thickness": 10 } } ) @@ -344,8 +274,25 @@ def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame, def graph_trending(data: pd.DataFrame, sel:dict, layout: dict, - start: datetime, end: datetime) -> tuple: - """ + start: datetime, end: datetime, normalize: bool) -> tuple: + """Generate the trending graph(s) - MRR, NDR, PDR and for PDR also Latences + (result_latency_forward_pdr_50_avg). + + :param data: Data frame with test results. + :param sel: Selected tests. + :param layout: Layout of plot.ly graph. + :param start: The date (and time) when the selected data starts. + :param end: The date (and time) when the selected data ends. + :param normalize: If True, the data is normalized to CPU frquency + Constants.NORM_FREQUENCY. + :type data: pandas.DataFrame + :type sel: dict + :type layout: dict + :type start: datetime.datetime + :type end: datetype.datetype + :type normalize: bool + :returns: Trending graph(s) + :rtype: tuple(plotly.graph_objects.Figure, plotly.graph_objects.Figure) """ if not sel: @@ -361,8 +308,15 @@ def graph_trending(data: pd.DataFrame, sel:dict, layout: dict, name = "-".join((itm["dut"], itm["phy"], itm["framesize"], itm["core"], itm["test"], itm["testtype"], )) + if normalize: + phy = itm["phy"].split("-") + topo_arch = f"{phy[0]}-{phy[1]}" if len(phy) == 4 else str() + norm_factor = (C.NORM_FREQUENCY / C.FREQUENCY[topo_arch]) \ + if topo_arch else 1.0 + else: + norm_factor = 1.0 traces = _generate_trending_traces( - itm["testtype"], name, df, start, end, _COLORS[idx % len(_COLORS)] + itm["testtype"], name, df, start, end, get_color(idx), norm_factor ) if traces: if not fig_tput: @@ -371,7 +325,7 @@ def graph_trending(data: pd.DataFrame, sel:dict, layout: dict, if itm["testtype"] == "pdr": traces = _generate_trending_traces( - "pdr-lat", name, df, start, end, _COLORS[idx % len(_COLORS)] + "pdr-lat", name, df, start, end, get_color(idx), norm_factor ) if traces: if not fig_lat: @@ -387,7 +341,14 @@ def graph_trending(data: pd.DataFrame, sel:dict, layout: dict, def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure: - """ + """Generate HDR Latency histogram graphs. + + :param data: HDRH data. + :param layout: Layout of plot.ly graph. + :type data: dict + :type layout: dict + :returns: HDR latency Histogram. + :rtype: plotly.graph_objects.Figure """ fig = None @@ -408,12 +369,12 @@ def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure: # For 100%, we cut that down to "x_perc" to avoid # infinity. percentile = item.percentile_level_iterated_to - x_perc = min(percentile, PERCENTILE_MAX) + x_perc = min(percentile, C.PERCENTILE_MAX) xaxis.append(previous_x) yaxis.append(item.value_iterated_to) hovertext.append( - f"{_GRAPH_LAT_HDRH_DESC[lat_name]}
" - f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" + f"{C.GRAPH_LAT_HDRH_DESC[lat_name]}
" + f"Direction: {('W-E', 'E-W')[idx % 2]}
" f"Percentile: {prev_perc:.5f}-{percentile:.5f}%
" f"Latency: {item.value_iterated_to}uSec" ) @@ -421,8 +382,8 @@ def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure: xaxis.append(next_x) yaxis.append(item.value_iterated_to) hovertext.append( - f"{_GRAPH_LAT_HDRH_DESC[lat_name]}
" - f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" + f"{C.GRAPH_LAT_HDRH_DESC[lat_name]}
" + f"Direction: {('W-E', 'E-W')[idx % 2]}
" f"Percentile: {prev_perc:.5f}-{percentile:.5f}%
" f"Latency: {item.value_iterated_to}uSec" ) @@ -433,17 +394,17 @@ def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure: go.Scatter( x=xaxis, y=yaxis, - name=_GRAPH_LAT_HDRH_DESC[lat_name], - mode=u"lines", - legendgroup=_GRAPH_LAT_HDRH_DESC[lat_name], + name=C.GRAPH_LAT_HDRH_DESC[lat_name], + mode="lines", + legendgroup=C.GRAPH_LAT_HDRH_DESC[lat_name], showlegend=bool(idx % 2), line=dict( - color=_COLORS[int(idx/2)], - dash=u"solid", + color=get_color(int(idx/2)), + dash="solid", width=1 if idx % 2 else 2 ), hovertext=hovertext, - hoverinfo=u"text" + hoverinfo="text" ) ) if traces: