C-Dash: Add regexp filtering to comparison tables
[csit.git] / csit.infra.dash / app / cdash / coverage / tables.py
1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """The coverage data tables.
15 """
16
17 import hdrh.histogram
18 import hdrh.codec
19 import pandas as pd
20 import dash_bootstrap_components as dbc
21
22 from dash import dash_table
23 from dash.dash_table.Format import Format, Scheme
24
25 from ..utils.constants import Constants as C
26
27
28 def select_coverage_data(
29         data: pd.DataFrame,
30         selected: dict,
31         csv: bool=False
32     ) -> list:
33     """Select coverage data for the tables and generate tables as pandas data
34     frames.
35
36     :param data: Coverage data.
37     :param selected: Dictionary with user selection.
38     :param csv: If True, pandas data frame with selected coverage data is
39         returned for "Download Data" feature.
40     :type data: pandas.DataFrame
41     :type selected: dict
42     :type csv: bool
43     :returns: List of tuples with suite name (str) and data (pandas dataframe)
44         or pandas dataframe if csv is True.
45     :rtype: list[tuple[str, pandas.DataFrame], ] or pandas.DataFrame
46     """
47
48     l_data = list()
49
50     # Filter data selected by the user.
51     phy = selected["phy"].split("-")
52     if len(phy) == 4:
53         topo, arch, nic, drv = phy
54         drv = "" if drv == "dpdk" else drv.replace("_", "-")
55     else:
56         return l_data
57
58     df = pd.DataFrame(data.loc[(
59         (data["passed"] == True) &
60         (data["dut_type"] == selected["dut"]) &
61         (data["dut_version"] == selected["dutver"]) &
62         (data["release"] == selected["rls"])
63     )])
64     df = df[
65         (df.job.str.endswith(f"{topo}-{arch}")) &
66         (df.test_id.str.contains(
67             f"^.*\.{selected['area']}\..*{nic}.*{drv}.*$",
68             regex=True
69         ))
70     ]
71     if drv == "dpdk":
72         for driver in C.DRIVERS:
73             df.drop(
74                 df[df.test_id.str.contains(f"-{driver}-")].index,
75                 inplace=True
76             )
77
78     ttype = df["test_type"].to_list()[0]
79
80     # Prepare the coverage data
81     def _laten(hdrh_string: str, percentile: float) -> int:
82         """Get latency from HDRH string for given percentile.
83
84         :param hdrh_string: Encoded HDRH string.
85         :param percentile: Given percentile.
86         :type hdrh_string: str
87         :type percentile: float
88         :returns: The latency value for the given percentile from the encoded
89             HDRH string.
90         :rtype: int
91         """
92         try:
93             hdr_lat = hdrh.histogram.HdrHistogram.decode(hdrh_string)
94             return hdr_lat.get_value_at_percentile(percentile)
95         except (hdrh.codec.HdrLengthException, TypeError):
96             return None
97
98     def _get_suite(test_id: str) -> str:
99         """Get the suite name from the test ID.
100         """
101         return test_id.split(".")[-2].replace("2n1l-", "").\
102             replace("1n1l-", "").replace("2n-", "").replace("-ndrpdr", "")
103
104     def _get_test(test_id: str) -> str:
105         """Get the test name from the test ID.
106         """
107         return test_id.split(".")[-1].replace("-ndrpdr", "")
108
109     cov = pd.DataFrame()
110     cov["Suite"] = df.apply(lambda row: _get_suite(row["test_id"]), axis=1)
111     cov["Test Name"] = df.apply(lambda row: _get_test(row["test_id"]), axis=1)
112
113     if ttype == "device":
114         cov = cov.assign(Result="PASS")
115     else:
116         cov["Throughput_Unit"] = df["result_pdr_lower_rate_unit"]
117         cov["Throughput_NDR"] = df.apply(
118             lambda row: row["result_ndr_lower_rate_value"] / 1e6, axis=1
119         )
120         cov["Throughput_NDR_Mbps"] = df.apply(
121             lambda row: row["result_ndr_lower_bandwidth_value"] /1e9, axis=1
122         )
123         cov["Throughput_PDR"] = df.apply(
124             lambda row: row["result_pdr_lower_rate_value"] / 1e6, axis=1
125         )
126         cov["Throughput_PDR_Mbps"] = df.apply(
127             lambda row: row["result_pdr_lower_bandwidth_value"] /1e9, axis=1
128         )
129         cov["Latency Forward [us]_10% PDR_P50"] = df.apply(
130             lambda row: _laten(row["result_latency_forward_pdr_10_hdrh"], 50.0),
131             axis=1
132         )
133         cov["Latency Forward [us]_10% PDR_P90"] = df.apply(
134             lambda row: _laten(row["result_latency_forward_pdr_10_hdrh"], 90.0),
135             axis=1
136         )
137         cov["Latency Forward [us]_10% PDR_P99"] = df.apply(
138             lambda row: _laten(row["result_latency_forward_pdr_10_hdrh"], 99.0),
139             axis=1
140         )
141         cov["Latency Forward [us]_50% PDR_P50"] = df.apply(
142             lambda row: _laten(row["result_latency_forward_pdr_50_hdrh"], 50.0),
143             axis=1
144         )
145         cov["Latency Forward [us]_50% PDR_P90"] = df.apply(
146             lambda row: _laten(row["result_latency_forward_pdr_50_hdrh"], 90.0),
147             axis=1
148         )
149         cov["Latency Forward [us]_50% PDR_P99"] = df.apply(
150             lambda row: _laten(row["result_latency_forward_pdr_50_hdrh"], 99.0),
151             axis=1
152         )
153         cov["Latency Forward [us]_90% PDR_P50"] = df.apply(
154             lambda row: _laten(row["result_latency_forward_pdr_90_hdrh"], 50.0),
155             axis=1
156         )
157         cov["Latency Forward [us]_90% PDR_P90"] = df.apply(
158             lambda row: _laten(row["result_latency_forward_pdr_90_hdrh"], 90.0),
159             axis=1
160         )
161         cov["Latency Forward [us]_90% PDR_P99"] = df.apply(
162             lambda row: _laten(row["result_latency_forward_pdr_90_hdrh"], 99.0),
163             axis=1
164         )
165         cov["Latency Reverse [us]_10% PDR_P50"] = df.apply(
166             lambda row: _laten(row["result_latency_reverse_pdr_10_hdrh"], 50.0),
167             axis=1
168         )
169         cov["Latency Reverse [us]_10% PDR_P90"] = df.apply(
170             lambda row: _laten(row["result_latency_reverse_pdr_10_hdrh"], 90.0),
171             axis=1
172         )
173         cov["Latency Reverse [us]_10% PDR_P99"] = df.apply(
174             lambda row: _laten(row["result_latency_reverse_pdr_10_hdrh"], 99.0),
175             axis=1
176         )
177         cov["Latency Reverse [us]_50% PDR_P50"] = df.apply(
178             lambda row: _laten(row["result_latency_reverse_pdr_50_hdrh"], 50.0),
179             axis=1
180         )
181         cov["Latency Reverse [us]_50% PDR_P90"] = df.apply(
182             lambda row: _laten(row["result_latency_reverse_pdr_50_hdrh"], 90.0),
183             axis=1
184         )
185         cov["Latency Reverse [us]_50% PDR_P99"] = df.apply(
186             lambda row: _laten(row["result_latency_reverse_pdr_50_hdrh"], 99.0),
187             axis=1
188         )
189         cov["Latency Reverse [us]_90% PDR_P50"] = df.apply(
190             lambda row: _laten(row["result_latency_reverse_pdr_90_hdrh"], 50.0),
191             axis=1
192         )
193         cov["Latency Reverse [us]_90% PDR_P90"] = df.apply(
194             lambda row: _laten(row["result_latency_reverse_pdr_90_hdrh"], 90.0),
195             axis=1
196         )
197         cov["Latency Reverse [us]_90% PDR_P99"] = df.apply(
198             lambda row: _laten(row["result_latency_reverse_pdr_90_hdrh"], 99.0),
199             axis=1
200         )
201
202     if csv:
203         return cov
204
205     # Split data into tables depending on the test suite.
206     for suite in cov["Suite"].unique().tolist():
207         df_suite = pd.DataFrame(cov.loc[(cov["Suite"] == suite)])
208
209         if ttype !="device":
210             unit = df_suite["Throughput_Unit"].tolist()[0]
211             df_suite.rename(
212                 columns={
213                     "Throughput_NDR": f"Throughput_NDR_M{unit}",
214                     "Throughput_PDR": f"Throughput_PDR_M{unit}"
215                 },
216                 inplace=True
217             )
218             df_suite.drop(["Suite", "Throughput_Unit"], axis=1, inplace=True)
219
220         l_data.append((suite, df_suite, ))
221
222     return l_data
223
224
225 def coverage_tables(data: pd.DataFrame, selected: dict) -> list:
226     """Generate an accordion with coverage tables.
227
228     :param data: Coverage data.
229     :param selected: Dictionary with user selection.
230     :type data: pandas.DataFrame
231     :type selected: dict
232     :returns: Accordion with suite names (titles) and tables.
233     :rtype: dash_bootstrap_components.Accordion
234     """
235
236     accordion_items = list()
237     for suite, cov_data in select_coverage_data(data, selected):
238         if len(cov_data.columns) == 3:  # VPP Device
239             cols = [
240                 {
241                     "name": col,
242                     "id": col,
243                     "deletable": False,
244                     "selectable": False,
245                     "type": "text"
246                 } for col in cov_data.columns
247             ]
248             style_cell={"textAlign": "left"}
249             style_cell_conditional=[
250                 {
251                     "if": {"column_id": "Result"},
252                     "textAlign": "right"
253                 }
254             ]
255         else:  # Performance
256             cols = list()
257             for idx, col in enumerate(cov_data.columns):
258                 if idx == 0:
259                     cols.append({
260                         "name": ["", "", col],
261                         "id": col,
262                         "deletable": False,
263                         "selectable": False,
264                         "type": "text"
265                     })
266                 elif idx < 5:
267                     cols.append({
268                         "name": col.split("_"),
269                         "id": col,
270                         "deletable": False,
271                         "selectable": False,
272                         "type": "numeric",
273                         "format": Format(precision=2, scheme=Scheme.fixed)
274                     })
275                 else:
276                     cols.append({
277                         "name": col.split("_"),
278                         "id": col,
279                         "deletable": False,
280                         "selectable": False,
281                         "type": "numeric",
282                         "format": Format(precision=0, scheme=Scheme.fixed)
283                     })
284             style_cell={"textAlign": "right"}
285             style_cell_conditional=[
286                 {
287                     "if": {"column_id": "Test Name"},
288                     "textAlign": "left"
289                 }
290             ]
291
292         accordion_items.append(
293             dbc.AccordionItem(
294                 title=suite,
295                 children=dash_table.DataTable(
296                     columns=cols,
297                     data=cov_data.to_dict("records"),
298                     merge_duplicate_headers=True,
299                     editable=False,
300                     filter_action="none",
301                     sort_action="native",
302                     sort_mode="multi",
303                     selected_columns=[],
304                     selected_rows=[],
305                     page_action="none",
306                     style_cell=style_cell,
307                     style_cell_conditional=style_cell_conditional
308                 )
309             )
310         )
311     return dbc.Accordion(
312         children=accordion_items,
313         class_name="gy-1 p-0",
314         start_collapsed=True,
315         always_open=True
316     )