X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=csit.infra.dash%2Fapp%2Fcdash%2Fcoverage%2Ftables.py;h=372a8206bfc0226c2d6868513a9d2ecad2d518c9;hb=2ff03f4555ad917f74259f971d6d25c7cfd1861c;hp=a773a2280c76bd60cb2ea91834c7c6c3910bfe84;hpb=273821dc854ba53015e022600574655160ce1a40;p=csit.git diff --git a/csit.infra.dash/app/cdash/coverage/tables.py b/csit.infra.dash/app/cdash/coverage/tables.py index a773a2280c..372a8206bf 100644 --- a/csit.infra.dash/app/cdash/coverage/tables.py +++ b/csit.infra.dash/app/cdash/coverage/tables.py @@ -14,6 +14,7 @@ """The coverage data tables. """ + import hdrh.histogram import hdrh.codec import pandas as pd @@ -28,7 +29,8 @@ from ..utils.constants import Constants as C def select_coverage_data( data: pd.DataFrame, selected: dict, - csv: bool=False + csv: bool=False, + show_latency: bool=True ) -> list: """Select coverage data for the tables and generate tables as pandas data frames. @@ -37,9 +39,11 @@ def select_coverage_data( :param selected: Dictionary with user selection. :param csv: If True, pandas data frame with selected coverage data is returned for "Download Data" feature. + :param show_latency: If True, latency is displayed in the tables. :type data: pandas.DataFrame :type selected: dict :type csv: bool + :type show_latency: bool :returns: List of tuples with suite name (str) and data (pandas dataframe) or pandas dataframe if csv is True. :rtype: list[tuple[str, pandas.DataFrame], ] or pandas.DataFrame @@ -51,7 +55,7 @@ def select_coverage_data( phy = selected["phy"].split("-") if len(phy) == 4: topo, arch, nic, drv = phy - drv = "" if drv == "dpdk" else drv.replace("_", "-") + drv_str = "" if drv == "dpdk" else drv.replace("_", "-") else: return l_data @@ -64,7 +68,7 @@ def select_coverage_data( df = df[ (df.job.str.endswith(f"{topo}-{arch}")) & (df.test_id.str.contains( - f"^.*\.{selected['area']}\..*{nic}.*{drv}.*$", + f"^.*\.{selected['area']}\..*{nic}.*{drv_str}.*$", regex=True )) ] @@ -75,6 +79,8 @@ def select_coverage_data( inplace=True ) + ttype = df["test_type"].to_list()[0] + # Prepare the coverage data def _latency(hdrh_string: str, percentile: float) -> int: """Get latency from HDRH string for given percentile. @@ -105,153 +111,170 @@ def select_coverage_data( return test_id.split(".")[-1].replace("-ndrpdr", "") cov = pd.DataFrame() - cov["suite"] = df.apply(lambda row: _get_suite(row["test_id"]), axis=1) + cov["Suite"] = df.apply(lambda row: _get_suite(row["test_id"]), axis=1) cov["Test Name"] = df.apply(lambda row: _get_test(row["test_id"]), axis=1) - cov["Throughput_Unit"] = df["result_pdr_lower_rate_unit"] - cov["Throughput_NDR"] = df.apply( - lambda row: row["result_ndr_lower_rate_value"] / 1e6, axis=1 - ) - cov["Throughput_NDR_Mbps"] = df.apply( - lambda row: row["result_ndr_lower_bandwidth_value"] /1e9, axis=1 - ) - cov["Throughput_PDR"] = \ - df.apply(lambda row: row["result_pdr_lower_rate_value"] / 1e6, axis=1) - cov["Throughput_PDR_Mbps"] = df.apply( - lambda row: row["result_pdr_lower_bandwidth_value"] /1e9, axis=1 - ) - cov["Latency Forward [us]_10% PDR_P50"] = df.apply( - lambda row: _latency(row["result_latency_forward_pdr_10_hdrh"], 50.0), - axis=1 - ) - cov["Latency Forward [us]_10% PDR_P90"] = df.apply( - lambda row: _latency(row["result_latency_forward_pdr_10_hdrh"], 90.0), - axis=1 - ) - cov["Latency Forward [us]_10% PDR_P99"] = df.apply( - lambda row: _latency(row["result_latency_forward_pdr_10_hdrh"], 99.0), - axis=1 - ) - cov["Latency Forward [us]_50% PDR_P50"] = df.apply( - lambda row: _latency(row["result_latency_forward_pdr_50_hdrh"], 50.0), - axis=1 - ) - cov["Latency Forward [us]_50% PDR_P90"] = df.apply( - lambda row: _latency(row["result_latency_forward_pdr_50_hdrh"], 90.0), - axis=1 - ) - cov["Latency Forward [us]_50% PDR_P99"] = df.apply( - lambda row: _latency(row["result_latency_forward_pdr_50_hdrh"], 99.0), - axis=1 - ) - cov["Latency Forward [us]_90% PDR_P50"] = df.apply( - lambda row: _latency(row["result_latency_forward_pdr_90_hdrh"], 50.0), - axis=1 - ) - cov["Latency Forward [us]_90% PDR_P90"] = df.apply( - lambda row: _latency(row["result_latency_forward_pdr_90_hdrh"], 90.0), - axis=1 - ) - cov["Latency Forward [us]_90% PDR_P99"] = df.apply( - lambda row: _latency(row["result_latency_forward_pdr_90_hdrh"], 99.0), - axis=1 - ) - cov["Latency Reverse [us]_10% PDR_P50"] = df.apply( - lambda row: _latency(row["result_latency_reverse_pdr_10_hdrh"], 50.0), - axis=1 - ) - cov["Latency Reverse [us]_10% PDR_P90"] = df.apply( - lambda row: _latency(row["result_latency_reverse_pdr_10_hdrh"], 90.0), - axis=1 - ) - cov["Latency Reverse [us]_10% PDR_P99"] = df.apply( - lambda row: _latency(row["result_latency_reverse_pdr_10_hdrh"], 99.0), - axis=1 - ) - cov["Latency Reverse [us]_50% PDR_P50"] = df.apply( - lambda row: _latency(row["result_latency_reverse_pdr_50_hdrh"], 50.0), - axis=1 - ) - cov["Latency Reverse [us]_50% PDR_P90"] = df.apply( - lambda row: _latency(row["result_latency_reverse_pdr_50_hdrh"], 90.0), - axis=1 - ) - cov["Latency Reverse [us]_50% PDR_P99"] = df.apply( - lambda row: _latency(row["result_latency_reverse_pdr_50_hdrh"], 99.0), - axis=1 - ) - cov["Latency Reverse [us]_90% PDR_P50"] = df.apply( - lambda row: _latency(row["result_latency_reverse_pdr_90_hdrh"], 50.0), - axis=1 - ) - cov["Latency Reverse [us]_90% PDR_P90"] = df.apply( - lambda row: _latency(row["result_latency_reverse_pdr_90_hdrh"], 90.0), - axis=1 - ) - cov["Latency Reverse [us]_90% PDR_P99"] = df.apply( - lambda row: _latency(row["result_latency_reverse_pdr_90_hdrh"], 99.0), - axis=1 - ) + + if ttype == "device": + cov = cov.assign(Result="PASS") + elif ttype == "mrr": + cov["Throughput_Unit"] = df["result_receive_rate_rate_unit"] + cov["Throughput_AVG"] = df.apply( + lambda row: row["result_receive_rate_rate_avg"] / 1e9, axis=1 + ) + cov["Throughput_STDEV"] = df.apply( + lambda row: row["result_receive_rate_rate_stdev"] / 1e9, axis=1 + ) + else: # NDRPDR + cov["Throughput_Unit"] = df["result_pdr_lower_rate_unit"] + cov["Throughput_NDR"] = df.apply( + lambda row: row["result_ndr_lower_rate_value"] / 1e6, axis=1 + ) + cov["Throughput_NDR_Gbps"] = df.apply( + lambda row: row["result_ndr_lower_bandwidth_value"] / 1e9, axis=1 + ) + cov["Throughput_PDR"] = df.apply( + lambda row: row["result_pdr_lower_rate_value"] / 1e6, axis=1 + ) + cov["Throughput_PDR_Gbps"] = df.apply( + lambda row: row["result_pdr_lower_bandwidth_value"] / 1e9, axis=1 + ) + if show_latency: + for way in ("Forward", "Reverse"): + for pdr in (10, 50, 90): + for perc in (50, 90, 99): + latency = f"result_latency_{way.lower()}_pdr_{pdr}_hdrh" + cov[f"Latency {way} [us]_{pdr}% PDR_P{perc}"] = \ + df.apply( + lambda row: _latency(row[latency], perc), + axis=1 + ) if csv: return cov - # Split data into tabels depending on the test suite. - for suite in cov["suite"].unique().tolist(): - df_suite = pd.DataFrame(cov.loc[(cov["suite"] == suite)]) - unit = df_suite["Throughput_Unit"].tolist()[0] - df_suite.rename( - columns={ - "Throughput_NDR": f"Throughput_NDR_M{unit}", - "Throughput_PDR": f"Throughput_PDR_M{unit}" - }, - inplace=True - ) - df_suite.drop(["suite", "Throughput_Unit"], axis=1, inplace=True) + # Split data into tables depending on the test suite. + for suite in cov["Suite"].unique().tolist(): + df_suite = pd.DataFrame(cov.loc[(cov["Suite"] == suite)]) + + if ttype !="device": + unit = df_suite["Throughput_Unit"].tolist()[0] + df_suite.rename( + columns={ + "Throughput_NDR": f"Throughput_NDR_M{unit}", + "Throughput_PDR": f"Throughput_PDR_M{unit}", + "Throughput_AVG": f"Throughput_G{unit}_AVG", + "Throughput_STDEV": f"Throughput_G{unit}_STDEV" + }, + inplace=True + ) + df_suite.drop(["Suite", "Throughput_Unit"], axis=1, inplace=True) + l_data.append((suite, df_suite, )) - return l_data + + return l_data, ttype -def coverage_tables(data: pd.DataFrame, selected: dict) -> list: +def coverage_tables( + data: pd.DataFrame, + selected: dict, + show_latency: bool=True + ) -> list: """Generate an accordion with coverage tables. :param data: Coverage data. :param selected: Dictionary with user selection. + :param show_latency: If True, latency is displayed in the tables. :type data: pandas.DataFrame :type selected: dict + :type show_latency: bool :returns: Accordion with suite names (titles) and tables. :rtype: dash_bootstrap_components.Accordion """ accordion_items = list() - for suite, cov_data in select_coverage_data(data, selected): - cols = list() - for idx, col in enumerate(cov_data.columns): - if idx == 0: - cols.append({ - "name": ["", "", col], + sel_data, ttype = \ + select_coverage_data(data, selected, show_latency=show_latency) + for suite, cov_data in sel_data: + if ttype == "device": # VPP Device + cols = [ + { + "name": col, "id": col, "deletable": False, "selectable": False, "type": "text" - }) - elif idx < 5: - cols.append({ - "name": col.split("_"), - "id": col, - "deletable": False, - "selectable": False, - "type": "numeric", - "format": Format(precision=2, scheme=Scheme.fixed) - }) - else: - cols.append({ - "name": col.split("_"), - "id": col, - "deletable": False, - "selectable": False, - "type": "numeric", - "format": Format(precision=0, scheme=Scheme.fixed) - }) + } for col in cov_data.columns + ] + style_cell={"textAlign": "left"} + style_cell_conditional=[ + { + "if": {"column_id": "Result"}, + "textAlign": "right" + } + ] + elif ttype == "mrr": # MRR + cols = list() + for idx, col in enumerate(cov_data.columns): + if idx == 0: + cols.append({ + "name": ["", "", col], + "id": col, + "deletable": False, + "selectable": False, + "type": "text" + }) + else: + cols.append({ + "name": col.split("_"), + "id": col, + "deletable": False, + "selectable": False, + "type": "numeric", + "format": Format(precision=2, scheme=Scheme.fixed) + }) + style_cell={"textAlign": "right"} + style_cell_conditional=[ + { + "if": {"column_id": "Test Name"}, + "textAlign": "left" + } + ] + else: # Performance NDRPDR + cols = list() + for idx, col in enumerate(cov_data.columns): + if idx == 0: + cols.append({ + "name": ["", "", col], + "id": col, + "deletable": False, + "selectable": False, + "type": "text" + }) + elif idx < 5: + cols.append({ + "name": col.split("_"), + "id": col, + "deletable": False, + "selectable": False, + "type": "numeric", + "format": Format(precision=2, scheme=Scheme.fixed) + }) + else: + cols.append({ + "name": col.split("_"), + "id": col, + "deletable": False, + "selectable": False, + "type": "numeric", + "format": Format(precision=0, scheme=Scheme.fixed) + }) + style_cell={"textAlign": "right"} + style_cell_conditional=[ + { + "if": {"column_id": "Test Name"}, + "textAlign": "left" + } + ] accordion_items.append( dbc.AccordionItem( @@ -260,25 +283,21 @@ def coverage_tables(data: pd.DataFrame, selected: dict) -> list: columns=cols, data=cov_data.to_dict("records"), merge_duplicate_headers=True, - editable=True, + editable=False, filter_action="none", sort_action="native", sort_mode="multi", selected_columns=[], selected_rows=[], page_action="none", - style_cell={"textAlign": "right"}, - style_cell_conditional=[{ - "if": {"column_id": "Test Name"}, - "textAlign": "left" - }] + style_cell=style_cell, + style_cell_conditional=style_cell_conditional ) ) ) - return dbc.Accordion( - children=accordion_items, - class_name="gy-2 p-0", - start_collapsed=True, - always_open=True - ) + children=accordion_items, + class_name="gy-1 p-0", + start_collapsed=True, + always_open=True + )