X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;ds=sidebyside;f=resources%2Ftools%2Fdash%2Fapp%2Fpal%2Freport%2Fgraphs.py;h=36f28d09e8d925b6207d8b2e16e84bd792bc9cc7;hb=808797d2d913eac7581a4e4cba3fb826ddbff775;hp=634e539f7e194558af929c075fba17d8da57148c;hpb=51da461dcdb1ed20abe73b616ceb971569c9b884;p=csit.git diff --git a/resources/tools/dash/app/pal/report/graphs.py b/resources/tools/dash/app/pal/report/graphs.py index 634e539f7e..36f28d09e8 100644 --- a/resources/tools/dash/app/pal/report/graphs.py +++ b/resources/tools/dash/app/pal/report/graphs.py @@ -20,62 +20,19 @@ import pandas as pd from copy import deepcopy -import hdrh.histogram -import hdrh.codec - - -_VALUE = { - "mrr": "result_receive_rate_rate_values", - "ndr": "result_ndr_lower_rate_value", - "pdr": "result_pdr_lower_rate_value", - "pdr-lat": "result_latency_forward_pdr_50_avg" -} -_UNIT = { - "mrr": "result_receive_rate_rate_unit", - "ndr": "result_ndr_lower_rate_unit", - "pdr": "result_pdr_lower_rate_unit", - "pdr-lat": "result_latency_forward_pdr_50_unit" -} -_LAT_HDRH = ( # Do not change the order - "result_latency_forward_pdr_0_hdrh", - "result_latency_reverse_pdr_0_hdrh", - "result_latency_forward_pdr_10_hdrh", - "result_latency_reverse_pdr_10_hdrh", - "result_latency_forward_pdr_50_hdrh", - "result_latency_reverse_pdr_50_hdrh", - "result_latency_forward_pdr_90_hdrh", - "result_latency_reverse_pdr_90_hdrh", -) -# This value depends on latency stream rate (9001 pps) and duration (5s). -# Keep it slightly higher to ensure rounding errors to not remove tick mark. -PERCENTILE_MAX = 99.999501 - -_GRAPH_LAT_HDRH_DESC = { - u"result_latency_forward_pdr_0_hdrh": u"No-load.", - u"result_latency_reverse_pdr_0_hdrh": u"No-load.", - u"result_latency_forward_pdr_10_hdrh": u"Low-load, 10% PDR.", - u"result_latency_reverse_pdr_10_hdrh": u"Low-load, 10% PDR.", - u"result_latency_forward_pdr_50_hdrh": u"Mid-load, 50% PDR.", - u"result_latency_reverse_pdr_50_hdrh": u"Mid-load, 50% PDR.", - u"result_latency_forward_pdr_90_hdrh": u"High-load, 90% PDR.", - u"result_latency_reverse_pdr_90_hdrh": u"High-load, 90% PDR." -} - - -def _get_color(idx: int) -> str: - """ - """ - _COLORS = ( - "#1A1110", "#DA2647", "#214FC6", "#01786F", "#BD8260", "#FFD12A", - "#A6E7FF", "#738276", "#C95A49", "#FC5A8D", "#CEC8EF", "#391285", - "#6F2DA8", "#FF878D", "#45A27D", "#FFD0B9", "#FD5240", "#DB91EF", - "#44D7A8", "#4F86F7", "#84DE02", "#FFCFF1", "#614051" - ) - return _COLORS[idx % len(_COLORS)] +from ..utils.constants import Constants as C +from ..utils.utils import get_color def get_short_version(version: str, dut_type: str="vpp") -> str: - """ + """Returns the short version of DUT without build number. + + :param version: Original version string. + :param dut_type: DUT type. + :type version: str + :type dut_type: str + :returns: Short verion string. + :rtype: str """ if dut_type in ("trex", "dpdk"): @@ -98,7 +55,15 @@ def get_short_version(version: str, dut_type: str="vpp") -> str: def select_iterative_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame: - """ + """Select the data for graphs and tables from the provided data frame. + + :param data: Data frame with data for graphs and tables. + :param itm: Item (in this case job name) which data will be selected from + the input data frame. + :type data: pandas.DataFrame + :type itm: str + :returns: A data frame with selected data. + :rtype: pandas.DataFrame """ phy = itm["phy"].split("-") @@ -144,8 +109,22 @@ def select_iterative_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame: return df -def graph_iterative(data: pd.DataFrame, sel:dict, layout: dict) -> tuple: - """ +def graph_iterative(data: pd.DataFrame, sel:dict, layout: dict, + normalize: bool) -> tuple: + """Generate the statistical box graph with iterative data (MRR, NDR and PDR, + for PDR also Latencies). + + :param data: Data frame with iterative data. + :param sel: Selected tests. + :param layout: Layout of plot.ly graph. + :param normalize: If True, the data is normalized to CPU frquency + Constants.NORM_FREQUENCY. + :param data: pandas.DataFrame + :param sel: dict + :param layout: dict + :param normalize: bool + :returns: Tuple of graphs - throughput and latency. + :rtype: tuple(plotly.graph_objects.Figure, plotly.graph_objects.Figure) """ fig_tput = None @@ -162,13 +141,19 @@ def graph_iterative(data: pd.DataFrame, sel:dict, layout: dict) -> tuple: itm_data = select_iterative_data(data, itm) if itm_data.empty: continue + phy = itm["phy"].split("-") + topo_arch = f"{phy[0]}-{phy[1]}" if len(phy) == 4 else str() + norm_factor = (C.NORM_FREQUENCY / C.FREQUENCY[topo_arch]) \ + if normalize else 1.0 if itm["testtype"] == "mrr": - y_data = itm_data[_VALUE[itm["testtype"]]].to_list()[0] - if y_data.size > 0: + y_data_raw = itm_data[C.VALUE_ITER[itm["testtype"]]].to_list()[0] + y_data = [(y * norm_factor) for y in y_data_raw] + if len(y_data) > 0: y_tput_max = \ max(y_data) if max(y_data) > y_tput_max else y_tput_max else: - y_data = itm_data[_VALUE[itm["testtype"]]].to_list() + y_data_raw = itm_data[C.VALUE_ITER[itm["testtype"]]].to_list() + y_data = [(y * norm_factor) for y in y_data_raw] if y_data: y_tput_max = \ max(y_data) if max(y_data) > y_tput_max else y_tput_max @@ -178,19 +163,20 @@ def graph_iterative(data: pd.DataFrame, sel:dict, layout: dict) -> tuple: name=( f"{idx + 1}. " f"({nr_of_samples:02d} " - f"run{u's' if nr_of_samples > 1 else u''}) " + f"run{'s' if nr_of_samples > 1 else ''}) " f"{itm['id']}" ), hoverinfo=u"y+name", boxpoints="all", jitter=0.3, - marker=dict(color=_get_color(idx)) + marker=dict(color=get_color(idx)) ) tput_traces.append(go.Box(**tput_kwargs)) show_tput = True if itm["testtype"] == "pdr": - y_lat = itm_data[_VALUE["pdr-lat"]].to_list() + y_lat_row = itm_data[C.VALUE_ITER["pdr-lat"]].to_list() + y_lat = [(y / norm_factor) for y in y_lat_row] if y_lat: y_lat_max = max(y_lat) if max(y_lat) > y_lat_max else y_lat_max nr_of_samples = len(y_lat) @@ -202,10 +188,10 @@ def graph_iterative(data: pd.DataFrame, sel:dict, layout: dict) -> tuple: f"run{u's' if nr_of_samples > 1 else u''}) " f"{itm['id']}" ), - hoverinfo=u"y+name", + hoverinfo="all", boxpoints="all", jitter=0.3, - marker=dict(color=_get_color(idx)) + marker=dict(color=get_color(idx)) ) x_lat.append(idx + 1) lat_traces.append(go.Box(**lat_kwargs)) @@ -215,134 +201,75 @@ def graph_iterative(data: pd.DataFrame, sel:dict, layout: dict) -> tuple: if show_tput: pl_tput = deepcopy(layout["plot-throughput"]) - pl_tput[u"xaxis"][u"tickvals"] = [i for i in range(len(sel))] - pl_tput[u"xaxis"][u"ticktext"] = [str(i + 1) for i in range(len(sel))] + pl_tput["xaxis"]["tickvals"] = [i for i in range(len(sel))] + pl_tput["xaxis"]["ticktext"] = [str(i + 1) for i in range(len(sel))] if y_tput_max: - pl_tput[u"yaxis"][u"range"] = [0, (int(y_tput_max / 1e6) + 1) * 1e6] + pl_tput["yaxis"]["range"] = [0, (int(y_tput_max / 1e6) + 1) * 1e6] fig_tput = go.Figure(data=tput_traces, layout=pl_tput) if show_latency: pl_lat = deepcopy(layout["plot-latency"]) - pl_lat[u"xaxis"][u"tickvals"] = [i for i in range(len(x_lat))] - pl_lat[u"xaxis"][u"ticktext"] = x_lat + pl_lat["xaxis"]["tickvals"] = [i for i in range(len(x_lat))] + pl_lat["xaxis"]["ticktext"] = x_lat if y_lat_max: - pl_lat[u"yaxis"][u"range"] = [0, (int(y_lat_max / 10) + 1) * 10] + pl_lat["yaxis"]["range"] = [0, (int(y_lat_max / 10) + 1) * 10] fig_lat = go.Figure(data=lat_traces, layout=pl_lat) return fig_tput, fig_lat -def table_comparison(data: pd.DataFrame, sel:dict) -> pd.DataFrame: - """ +def table_comparison(data: pd.DataFrame, sel:dict, + normalize: bool) -> pd.DataFrame: + """Generate the comparison table with selected tests. + + :param data: Data frame with iterative data. + :param sel: Selected tests. + :param normalize: If True, the data is normalized to CPU frquency + Constants.NORM_FREQUENCY. + :param data: pandas.DataFrame + :param sel: dict + :param normalize: bool + :returns: Comparison table. + :rtype: pandas.DataFrame """ table = pd.DataFrame( - { - "Test Case": [ - "64b-2t1c-avf-eth-l2xcbase-eth-2memif-1dcr", - "64b-2t1c-avf-eth-l2xcbase-eth-2vhostvr1024-1vm-vppl2xc", - "64b-2t1c-avf-ethip4udp-ip4base-iacl50sl-10kflows", - "78b-2t1c-avf-ethip6-ip6scale2m-rnd "], - "2106.0-8": [ - "14.45 +- 0.08", - "9.63 +- 0.05", - "9.7 +- 0.02", - "8.95 +- 0.06"], - "2110.0-8": [ - "14.45 +- 0.08", - "9.63 +- 0.05", - "9.7 +- 0.02", - "8.95 +- 0.06"], - "2110.0-9": [ - "14.45 +- 0.08", - "9.63 +- 0.05", - "9.7 +- 0.02", - "8.95 +- 0.06"], - "2202.0-9": [ - "14.45 +- 0.08", - "9.63 +- 0.05", - "9.7 +- 0.02", - "8.95 +- 0.06"], - "2110.0-9 vs 2110.0-8": [ - "-0.23 +- 0.62", - "-1.37 +- 1.3", - "+0.08 +- 0.2", - "-2.16 +- 0.83"], - "2202.0-9 vs 2110.0-9": [ - "+6.95 +- 0.72", - "+5.35 +- 1.26", - "+4.48 +- 1.48", - "+4.09 +- 0.95"] - } + # { + # "Test Case": [ + # "64b-2t1c-avf-eth-l2xcbase-eth-2memif-1dcr", + # "64b-2t1c-avf-eth-l2xcbase-eth-2vhostvr1024-1vm-vppl2xc", + # "64b-2t1c-avf-ethip4udp-ip4base-iacl50sl-10kflows", + # "78b-2t1c-avf-ethip6-ip6scale2m-rnd "], + # "2106.0-8": [ + # "14.45 +- 0.08", + # "9.63 +- 0.05", + # "9.7 +- 0.02", + # "8.95 +- 0.06"], + # "2110.0-8": [ + # "14.45 +- 0.08", + # "9.63 +- 0.05", + # "9.7 +- 0.02", + # "8.95 +- 0.06"], + # "2110.0-9": [ + # "14.45 +- 0.08", + # "9.63 +- 0.05", + # "9.7 +- 0.02", + # "8.95 +- 0.06"], + # "2202.0-9": [ + # "14.45 +- 0.08", + # "9.63 +- 0.05", + # "9.7 +- 0.02", + # "8.95 +- 0.06"], + # "2110.0-9 vs 2110.0-8": [ + # "-0.23 +- 0.62", + # "-1.37 +- 1.3", + # "+0.08 +- 0.2", + # "-2.16 +- 0.83"], + # "2202.0-9 vs 2110.0-9": [ + # "+6.95 +- 0.72", + # "+5.35 +- 1.26", + # "+4.48 +- 1.48", + # "+4.09 +- 0.95"] + # } ) - return pd.DataFrame() #table - - -def graph_hdrh_latency(data: dict, layout: dict) -> go.Figure: - """ - """ - - fig = None - - traces = list() - for idx, (lat_name, lat_hdrh) in enumerate(data.items()): - try: - decoded = hdrh.histogram.HdrHistogram.decode(lat_hdrh) - except (hdrh.codec.HdrLengthException, TypeError) as err: - continue - previous_x = 0.0 - prev_perc = 0.0 - xaxis = list() - yaxis = list() - hovertext = list() - for item in decoded.get_recorded_iterator(): - # The real value is "percentile". - # For 100%, we cut that down to "x_perc" to avoid - # infinity. - percentile = item.percentile_level_iterated_to - x_perc = min(percentile, PERCENTILE_MAX) - xaxis.append(previous_x) - yaxis.append(item.value_iterated_to) - hovertext.append( - f"{_GRAPH_LAT_HDRH_DESC[lat_name]}
" - f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" - f"Percentile: {prev_perc:.5f}-{percentile:.5f}%
" - f"Latency: {item.value_iterated_to}uSec" - ) - next_x = 100.0 / (100.0 - x_perc) - xaxis.append(next_x) - yaxis.append(item.value_iterated_to) - hovertext.append( - f"{_GRAPH_LAT_HDRH_DESC[lat_name]}
" - f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" - f"Percentile: {prev_perc:.5f}-{percentile:.5f}%
" - f"Latency: {item.value_iterated_to}uSec" - ) - previous_x = next_x - prev_perc = percentile - - traces.append( - go.Scatter( - x=xaxis, - y=yaxis, - name=_GRAPH_LAT_HDRH_DESC[lat_name], - mode=u"lines", - legendgroup=_GRAPH_LAT_HDRH_DESC[lat_name], - showlegend=bool(idx % 2), - line=dict( - color=_get_color(int(idx/2)), - dash=u"solid", - width=1 if idx % 2 else 2 - ), - hovertext=hovertext, - hoverinfo=u"text" - ) - ) - if traces: - fig = go.Figure() - fig.add_traces(traces) - layout_hdrh = layout.get("plot-hdrh-latency", None) - if lat_hdrh: - fig.update_layout(layout_hdrh) - - return fig + return table