Report: Fix typo in specification.
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
30 import pandas as pd
31
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
34
35 from pal_utils import mean, stdev, classify_anomalies, \
36     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
37
38
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40
41
42 def generate_tables(spec, data):
43     """Generate all tables specified in the specification file.
44
45     :param spec: Specification read from the specification file.
46     :param data: Data to process.
47     :type spec: Specification
48     :type data: InputData
49     """
50
51     generator = {
52         u"table_merged_details": table_merged_details,
53         u"table_soak_vs_ndr": table_soak_vs_ndr,
54         u"table_perf_trending_dash": table_perf_trending_dash,
55         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
56         u"table_last_failed_tests": table_last_failed_tests,
57         u"table_failed_tests": table_failed_tests,
58         u"table_failed_tests_html": table_failed_tests_html,
59         u"table_oper_data_html": table_oper_data_html,
60         u"table_comparison": table_comparison,
61         u"table_weekly_comparison": table_weekly_comparison
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             if table[u"algorithm"] == u"table_weekly_comparison":
68                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
69             generator[table[u"algorithm"]](table, data)
70         except NameError as err:
71             logging.error(
72                 f"Probably algorithm {table[u'algorithm']} is not defined: "
73                 f"{repr(err)}"
74             )
75     logging.info(u"Done.")
76
77
78 def table_oper_data_html(table, input_data):
79     """Generate the table(s) with algorithm: html_table_oper_data
80     specified in the specification file.
81
82     :param table: Table to generate.
83     :param input_data: Data to process.
84     :type table: pandas.Series
85     :type input_data: InputData
86     """
87
88     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
89     # Transform the data
90     logging.info(
91         f"    Creating the data set for the {table.get(u'type', u'')} "
92         f"{table.get(u'title', u'')}."
93     )
94     data = input_data.filter_data(
95         table,
96         params=[u"name", u"parent", u"show-run", u"type"],
97         continue_on_error=True
98     )
99     if data.empty:
100         return
101     data = input_data.merge_data(data)
102
103     sort_tests = table.get(u"sort", None)
104     if sort_tests:
105         args = dict(
106             inplace=True,
107             ascending=(sort_tests == u"ascending")
108         )
109         data.sort_index(**args)
110
111     suites = input_data.filter_data(
112         table,
113         continue_on_error=True,
114         data_set=u"suites"
115     )
116     if suites.empty:
117         return
118     suites = input_data.merge_data(suites)
119
120     def _generate_html_table(tst_data):
121         """Generate an HTML table with operational data for the given test.
122
123         :param tst_data: Test data to be used to generate the table.
124         :type tst_data: pandas.Series
125         :returns: HTML table with operational data.
126         :rtype: str
127         """
128
129         colors = {
130             u"header": u"#7eade7",
131             u"empty": u"#ffffff",
132             u"body": (u"#e9f1fb", u"#d4e4f7")
133         }
134
135         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
136
137         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138         thead = ET.SubElement(
139             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140         )
141         thead.text = tst_data[u"name"]
142
143         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144         thead = ET.SubElement(
145             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146         )
147         thead.text = u"\t"
148
149         if tst_data.get(u"show-run", u"No Data") == u"No Data":
150             trow = ET.SubElement(
151                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
152             )
153             tcol = ET.SubElement(
154                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
155             )
156             tcol.text = u"No Data"
157
158             trow = ET.SubElement(
159                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
160             )
161             thead = ET.SubElement(
162                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
163             )
164             font = ET.SubElement(
165                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166             )
167             font.text = u"."
168             return str(ET.tostring(tbl, encoding=u"unicode"))
169
170         tbl_hdr = (
171             u"Name",
172             u"Nr of Vectors",
173             u"Nr of Packets",
174             u"Suspends",
175             u"Cycles per Packet",
176             u"Average Vector Size"
177         )
178
179         for dut_data in tst_data[u"show-run"].values():
180             trow = ET.SubElement(
181                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
182             )
183             tcol = ET.SubElement(
184                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
185             )
186             if dut_data.get(u"threads", None) is None:
187                 tcol.text = u"No Data"
188                 continue
189
190             bold = ET.SubElement(tcol, u"b")
191             bold.text = (
192                 f"Host IP: {dut_data.get(u'host', '')}, "
193                 f"Socket: {dut_data.get(u'socket', '')}"
194             )
195             trow = ET.SubElement(
196                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
197             )
198             thead = ET.SubElement(
199                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200             )
201             thead.text = u"\t"
202
203             for thread_nr, thread in dut_data[u"threads"].items():
204                 trow = ET.SubElement(
205                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
206                 )
207                 tcol = ET.SubElement(
208                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
209                 )
210                 bold = ET.SubElement(tcol, u"b")
211                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212                 trow = ET.SubElement(
213                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
214                 )
215                 for idx, col in enumerate(tbl_hdr):
216                     tcol = ET.SubElement(
217                         trow, u"td",
218                         attrib=dict(align=u"right" if idx else u"left")
219                     )
220                     font = ET.SubElement(
221                         tcol, u"font", attrib=dict(size=u"2")
222                     )
223                     bold = ET.SubElement(font, u"b")
224                     bold.text = col
225                 for row_nr, row in enumerate(thread):
226                     trow = ET.SubElement(
227                         tbl, u"tr",
228                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
229                     )
230                     for idx, col in enumerate(row):
231                         tcol = ET.SubElement(
232                             trow, u"td",
233                             attrib=dict(align=u"right" if idx else u"left")
234                         )
235                         font = ET.SubElement(
236                             tcol, u"font", attrib=dict(size=u"2")
237                         )
238                         if isinstance(col, float):
239                             font.text = f"{col:.2f}"
240                         else:
241                             font.text = str(col)
242                 trow = ET.SubElement(
243                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
244                 )
245                 thead = ET.SubElement(
246                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247                 )
248                 thead.text = u"\t"
249
250         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251         thead = ET.SubElement(
252             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
253         )
254         font = ET.SubElement(
255             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256         )
257         font.text = u"."
258
259         return str(ET.tostring(tbl, encoding=u"unicode"))
260
261     for suite in suites.values:
262         html_table = str()
263         for test_data in data.values:
264             if test_data[u"parent"] not in suite[u"name"]:
265                 continue
266             html_table += _generate_html_table(test_data)
267         if not html_table:
268             continue
269         try:
270             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271             with open(f"{file_name}", u'w') as html_file:
272                 logging.info(f"    Writing file: {file_name}")
273                 html_file.write(u".. raw:: html\n\n\t")
274                 html_file.write(html_table)
275                 html_file.write(u"\n\t<p><br><br></p>\n")
276         except KeyError:
277             logging.warning(u"The output file is not defined.")
278             return
279     logging.info(u"  Done.")
280
281
282 def table_merged_details(table, input_data):
283     """Generate the table(s) with algorithm: table_merged_details
284     specified in the specification file.
285
286     :param table: Table to generate.
287     :param input_data: Data to process.
288     :type table: pandas.Series
289     :type input_data: InputData
290     """
291
292     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
293
294     # Transform the data
295     logging.info(
296         f"    Creating the data set for the {table.get(u'type', u'')} "
297         f"{table.get(u'title', u'')}."
298     )
299     data = input_data.filter_data(table, continue_on_error=True)
300     data = input_data.merge_data(data)
301
302     sort_tests = table.get(u"sort", None)
303     if sort_tests:
304         args = dict(
305             inplace=True,
306             ascending=(sort_tests == u"ascending")
307         )
308         data.sort_index(**args)
309
310     suites = input_data.filter_data(
311         table, continue_on_error=True, data_set=u"suites")
312     suites = input_data.merge_data(suites)
313
314     # Prepare the header of the tables
315     header = list()
316     for column in table[u"columns"]:
317         header.append(
318             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319         )
320
321     for suite in suites.values:
322         # Generate data
323         suite_name = suite[u"name"]
324         table_lst = list()
325         for test in data.keys():
326             if data[test][u"parent"] not in suite_name:
327                 continue
328             row_lst = list()
329             for column in table[u"columns"]:
330                 try:
331                     col_data = str(data[test][column[
332                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
333                     # Do not include tests with "Test Failed" in test message
334                     if u"Test Failed" in col_data:
335                         continue
336                     col_data = col_data.replace(
337                         u"No Data", u"Not Captured     "
338                     )
339                     if column[u"data"].split(u" ")[1] in (u"name", ):
340                         if len(col_data) > 30:
341                             col_data_lst = col_data.split(u"-")
342                             half = int(len(col_data_lst) / 2)
343                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
344                                        f"- |br| " \
345                                        f"{u'-'.join(col_data_lst[half:])}"
346                         col_data = f" |prein| {col_data} |preout| "
347                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
348                         # Temporary solution: remove NDR results from message:
349                         if bool(table.get(u'remove-ndr', False)):
350                             try:
351                                 col_data = col_data.split(u" |br| ", 1)[1]
352                             except IndexError:
353                                 pass
354                         col_data = f" |prein| {col_data} |preout| "
355                     elif column[u"data"].split(u" ")[1] in \
356                             (u"conf-history", u"show-run"):
357                         col_data = col_data.replace(u" |br| ", u"", 1)
358                         col_data = f" |prein| {col_data[:-5]} |preout| "
359                     row_lst.append(f'"{col_data}"')
360                 except KeyError:
361                     row_lst.append(u'"Not captured"')
362             if len(row_lst) == len(table[u"columns"]):
363                 table_lst.append(row_lst)
364
365         # Write the data to file
366         if table_lst:
367             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369             logging.info(f"      Writing file: {file_name}")
370             with open(file_name, u"wt") as file_handler:
371                 file_handler.write(u",".join(header) + u"\n")
372                 for item in table_lst:
373                     file_handler.write(u",".join(item) + u"\n")
374
375     logging.info(u"  Done.")
376
377
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379     """Modify a test name by replacing its parts.
380
381     :param test_name: Test name to be modified.
382     :param ignore_nic: If True, NIC is removed from TC name.
383     :type test_name: str
384     :type ignore_nic: bool
385     :returns: Modified test name.
386     :rtype: str
387     """
388     test_name_mod = test_name.\
389         replace(u"-ndrpdrdisc", u""). \
390         replace(u"-ndrpdr", u"").\
391         replace(u"-pdrdisc", u""). \
392         replace(u"-ndrdisc", u"").\
393         replace(u"-pdr", u""). \
394         replace(u"-ndr", u""). \
395         replace(u"1t1c", u"1c").\
396         replace(u"2t1c", u"1c"). \
397         replace(u"2t2c", u"2c").\
398         replace(u"4t2c", u"2c"). \
399         replace(u"4t4c", u"4c").\
400         replace(u"8t4c", u"4c")
401
402     if ignore_nic:
403         return re.sub(REGEX_NIC, u"", test_name_mod)
404     return test_name_mod
405
406
407 def _tpc_modify_displayed_test_name(test_name):
408     """Modify a test name which is displayed in a table by replacing its parts.
409
410     :param test_name: Test name to be modified.
411     :type test_name: str
412     :returns: Modified test name.
413     :rtype: str
414     """
415     return test_name.\
416         replace(u"1t1c", u"1c").\
417         replace(u"2t1c", u"1c"). \
418         replace(u"2t2c", u"2c").\
419         replace(u"4t2c", u"2c"). \
420         replace(u"4t4c", u"4c").\
421         replace(u"8t4c", u"4c")
422
423
424 def _tpc_insert_data(target, src, include_tests):
425     """Insert src data to the target structure.
426
427     :param target: Target structure where the data is placed.
428     :param src: Source data to be placed into the target stucture.
429     :param include_tests: Which results will be included (MRR, NDR, PDR).
430     :type target: list
431     :type src: dict
432     :type include_tests: str
433     """
434     try:
435         if include_tests == u"MRR":
436             target[u"mean"] = src[u"result"][u"receive-rate"]
437             target[u"stdev"] = src[u"result"][u"receive-stdev"]
438         elif include_tests == u"PDR":
439             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
440         elif include_tests == u"NDR":
441             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
442     except (KeyError, TypeError):
443         pass
444
445
446 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
447                              footnote=u"", sort_data=True, title=u"",
448                              generate_rst=True):
449     """Generate html table from input data with simple sorting possibility.
450
451     :param header: Table header.
452     :param data: Input data to be included in the table. It is a list of lists.
453         Inner lists are rows in the table. All inner lists must be of the same
454         length. The length of these lists must be the same as the length of the
455         header.
456     :param out_file_name: The name (relative or full path) where the
457         generated html table is written.
458     :param legend: The legend to display below the table.
459     :param footnote: The footnote to display below the table (and legend).
460     :param sort_data: If True the data sorting is enabled.
461     :param title: The table (and file) title.
462     :param generate_rst: If True, wrapping rst file is generated.
463     :type header: list
464     :type data: list of lists
465     :type out_file_name: str
466     :type legend: str
467     :type footnote: str
468     :type sort_data: bool
469     :type title: str
470     :type generate_rst: bool
471     """
472
473     try:
474         idx = header.index(u"Test Case")
475     except ValueError:
476         idx = 0
477     params = {
478         u"align-hdr": (
479             [u"left", u"right"],
480             [u"left", u"left", u"right"],
481             [u"left", u"left", u"left", u"right"]
482         ),
483         u"align-itm": (
484             [u"left", u"right"],
485             [u"left", u"left", u"right"],
486             [u"left", u"left", u"left", u"right"]
487         ),
488         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
489     }
490
491     df_data = pd.DataFrame(data, columns=header)
492
493     if sort_data:
494         df_sorted = [df_data.sort_values(
495             by=[key, header[idx]], ascending=[True, True]
496             if key != header[idx] else [False, True]) for key in header]
497         df_sorted_rev = [df_data.sort_values(
498             by=[key, header[idx]], ascending=[False, True]
499             if key != header[idx] else [True, True]) for key in header]
500         df_sorted.extend(df_sorted_rev)
501     else:
502         df_sorted = df_data
503
504     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
505                    for idx in range(len(df_data))]]
506     table_header = dict(
507         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
508         fill_color=u"#7eade7",
509         align=params[u"align-hdr"][idx],
510         font=dict(
511             family=u"Courier New",
512             size=12
513         )
514     )
515
516     fig = go.Figure()
517
518     if sort_data:
519         for table in df_sorted:
520             columns = [table.get(col) for col in header]
521             fig.add_trace(
522                 go.Table(
523                     columnwidth=params[u"width"][idx],
524                     header=table_header,
525                     cells=dict(
526                         values=columns,
527                         fill_color=fill_color,
528                         align=params[u"align-itm"][idx],
529                         font=dict(
530                             family=u"Courier New",
531                             size=12
532                         )
533                     )
534                 )
535             )
536
537         buttons = list()
538         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
539         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
540         for idx, hdr in enumerate(menu_items):
541             visible = [False, ] * len(menu_items)
542             visible[idx] = True
543             buttons.append(
544                 dict(
545                     label=hdr.replace(u" [Mpps]", u""),
546                     method=u"update",
547                     args=[{u"visible": visible}],
548                 )
549             )
550
551         fig.update_layout(
552             updatemenus=[
553                 go.layout.Updatemenu(
554                     type=u"dropdown",
555                     direction=u"down",
556                     x=0.0,
557                     xanchor=u"left",
558                     y=1.002,
559                     yanchor=u"bottom",
560                     active=len(menu_items) - 1,
561                     buttons=list(buttons)
562                 )
563             ],
564         )
565     else:
566         fig.add_trace(
567             go.Table(
568                 columnwidth=params[u"width"][idx],
569                 header=table_header,
570                 cells=dict(
571                     values=[df_sorted.get(col) for col in header],
572                     fill_color=fill_color,
573                     align=params[u"align-itm"][idx],
574                     font=dict(
575                         family=u"Courier New",
576                         size=12
577                     )
578                 )
579             )
580         )
581
582     ploff.plot(
583         fig,
584         show_link=False,
585         auto_open=False,
586         filename=f"{out_file_name}_in.html"
587     )
588
589     if not generate_rst:
590         return
591
592     file_name = out_file_name.split(u"/")[-1]
593     if u"vpp" in out_file_name:
594         path = u"_tmp/src/vpp_performance_tests/comparisons/"
595     else:
596         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
597     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
598         rst_file.write(
599             u"\n"
600             u".. |br| raw:: html\n\n    <br />\n\n\n"
601             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
602             u".. |preout| raw:: html\n\n    </pre>\n\n"
603         )
604         if title:
605             rst_file.write(f"{title}\n")
606             rst_file.write(f"{u'`' * len(title)}\n\n")
607         rst_file.write(
608             u".. raw:: html\n\n"
609             f'    <iframe frameborder="0" scrolling="no" '
610             f'width="1600" height="1200" '
611             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
612             f'</iframe>\n\n'
613         )
614         if legend:
615             rst_file.write(legend[1:].replace(u"\n", u" |br| "))
616         if footnote:
617             rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
618
619
620 def table_soak_vs_ndr(table, input_data):
621     """Generate the table(s) with algorithm: table_soak_vs_ndr
622     specified in the specification file.
623
624     :param table: Table to generate.
625     :param input_data: Data to process.
626     :type table: pandas.Series
627     :type input_data: InputData
628     """
629
630     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
631
632     # Transform the data
633     logging.info(
634         f"    Creating the data set for the {table.get(u'type', u'')} "
635         f"{table.get(u'title', u'')}."
636     )
637     data = input_data.filter_data(table, continue_on_error=True)
638
639     # Prepare the header of the table
640     try:
641         header = [
642             u"Test Case",
643             f"Avg({table[u'reference'][u'title']})",
644             f"Stdev({table[u'reference'][u'title']})",
645             f"Avg({table[u'compare'][u'title']})",
646             f"Stdev{table[u'compare'][u'title']})",
647             u"Diff",
648             u"Stdev(Diff)"
649         ]
650         header_str = u";".join(header) + u"\n"
651         legend = (
652             u"\nLegend:\n"
653             f"Avg({table[u'reference'][u'title']}): "
654             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
655             f"from a series of runs of the listed tests.\n"
656             f"Stdev({table[u'reference'][u'title']}): "
657             f"Standard deviation value of {table[u'reference'][u'title']} "
658             f"[Mpps] computed from a series of runs of the listed tests.\n"
659             f"Avg({table[u'compare'][u'title']}): "
660             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
661             f"a series of runs of the listed tests.\n"
662             f"Stdev({table[u'compare'][u'title']}): "
663             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
664             f"computed from a series of runs of the listed tests.\n"
665             f"Diff({table[u'reference'][u'title']},"
666             f"{table[u'compare'][u'title']}): "
667             f"Percentage change calculated for mean values.\n"
668             u"Stdev(Diff): "
669             u"Standard deviation of percentage change calculated for mean "
670             u"values.\n"
671             u":END"
672         )
673     except (AttributeError, KeyError) as err:
674         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
675         return
676
677     # Create a list of available SOAK test results:
678     tbl_dict = dict()
679     for job, builds in table[u"compare"][u"data"].items():
680         for build in builds:
681             for tst_name, tst_data in data[job][str(build)].items():
682                 if tst_data[u"type"] == u"SOAK":
683                     tst_name_mod = tst_name.replace(u"-soak", u"")
684                     if tbl_dict.get(tst_name_mod, None) is None:
685                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
686                         nic = groups.group(0) if groups else u""
687                         name = (
688                             f"{nic}-"
689                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
690                         )
691                         tbl_dict[tst_name_mod] = {
692                             u"name": name,
693                             u"ref-data": list(),
694                             u"cmp-data": list()
695                         }
696                     try:
697                         tbl_dict[tst_name_mod][u"cmp-data"].append(
698                             tst_data[u"throughput"][u"LOWER"])
699                     except (KeyError, TypeError):
700                         pass
701     tests_lst = tbl_dict.keys()
702
703     # Add corresponding NDR test results:
704     for job, builds in table[u"reference"][u"data"].items():
705         for build in builds:
706             for tst_name, tst_data in data[job][str(build)].items():
707                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
708                     replace(u"-mrr", u"")
709                 if tst_name_mod not in tests_lst:
710                     continue
711                 try:
712                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
713                         continue
714                     if table[u"include-tests"] == u"MRR":
715                         result = (tst_data[u"result"][u"receive-rate"],
716                                   tst_data[u"result"][u"receive-stdev"])
717                     elif table[u"include-tests"] == u"PDR":
718                         result = \
719                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
720                     elif table[u"include-tests"] == u"NDR":
721                         result = \
722                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
723                     else:
724                         result = None
725                     if result is not None:
726                         tbl_dict[tst_name_mod][u"ref-data"].append(
727                             result)
728                 except (KeyError, TypeError):
729                     continue
730
731     tbl_lst = list()
732     for tst_name in tbl_dict:
733         item = [tbl_dict[tst_name][u"name"], ]
734         data_r = tbl_dict[tst_name][u"ref-data"]
735         if data_r:
736             if table[u"include-tests"] == u"MRR":
737                 data_r_mean = data_r[0][0]
738                 data_r_stdev = data_r[0][1]
739             else:
740                 data_r_mean = mean(data_r)
741                 data_r_stdev = stdev(data_r)
742             item.append(round(data_r_mean / 1e6, 1))
743             item.append(round(data_r_stdev / 1e6, 1))
744         else:
745             data_r_mean = None
746             data_r_stdev = None
747             item.extend([None, None])
748         data_c = tbl_dict[tst_name][u"cmp-data"]
749         if data_c:
750             if table[u"include-tests"] == u"MRR":
751                 data_c_mean = data_c[0][0]
752                 data_c_stdev = data_c[0][1]
753             else:
754                 data_c_mean = mean(data_c)
755                 data_c_stdev = stdev(data_c)
756             item.append(round(data_c_mean / 1e6, 1))
757             item.append(round(data_c_stdev / 1e6, 1))
758         else:
759             data_c_mean = None
760             data_c_stdev = None
761             item.extend([None, None])
762         if data_r_mean is not None and data_c_mean is not None:
763             delta, d_stdev = relative_change_stdev(
764                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
765             try:
766                 item.append(round(delta))
767             except ValueError:
768                 item.append(delta)
769             try:
770                 item.append(round(d_stdev))
771             except ValueError:
772                 item.append(d_stdev)
773             tbl_lst.append(item)
774
775     # Sort the table according to the relative change
776     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
777
778     # Generate csv tables:
779     csv_file = f"{table[u'output-file']}.csv"
780     with open(csv_file, u"wt") as file_handler:
781         file_handler.write(header_str)
782         for test in tbl_lst:
783             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
784
785     convert_csv_to_pretty_txt(
786         csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
787     )
788     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
789         txt_file.write(legend)
790
791     # Generate html table:
792     _tpc_generate_html_table(
793         header,
794         tbl_lst,
795         table[u'output-file'],
796         legend=legend,
797         title=table.get(u"title", u"")
798     )
799
800
801 def table_perf_trending_dash(table, input_data):
802     """Generate the table(s) with algorithm:
803     table_perf_trending_dash
804     specified in the specification file.
805
806     :param table: Table to generate.
807     :param input_data: Data to process.
808     :type table: pandas.Series
809     :type input_data: InputData
810     """
811
812     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
813
814     # Transform the data
815     logging.info(
816         f"    Creating the data set for the {table.get(u'type', u'')} "
817         f"{table.get(u'title', u'')}."
818     )
819     data = input_data.filter_data(table, continue_on_error=True)
820
821     # Prepare the header of the tables
822     header = [
823         u"Test Case",
824         u"Trend [Mpps]",
825         u"Short-Term Change [%]",
826         u"Long-Term Change [%]",
827         u"Regressions [#]",
828         u"Progressions [#]"
829     ]
830     header_str = u",".join(header) + u"\n"
831
832     # Prepare data to the table:
833     tbl_dict = dict()
834     for job, builds in table[u"data"].items():
835         for build in builds:
836             for tst_name, tst_data in data[job][str(build)].items():
837                 if tst_name.lower() in table.get(u"ignore-list", list()):
838                     continue
839                 if tbl_dict.get(tst_name, None) is None:
840                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
841                     if not groups:
842                         continue
843                     nic = groups.group(0)
844                     tbl_dict[tst_name] = {
845                         u"name": f"{nic}-{tst_data[u'name']}",
846                         u"data": OrderedDict()
847                     }
848                 try:
849                     tbl_dict[tst_name][u"data"][str(build)] = \
850                         tst_data[u"result"][u"receive-rate"]
851                 except (TypeError, KeyError):
852                     pass  # No data in output.xml for this test
853
854     tbl_lst = list()
855     for tst_name in tbl_dict:
856         data_t = tbl_dict[tst_name][u"data"]
857         if len(data_t) < 2:
858             continue
859
860         classification_lst, avgs = classify_anomalies(data_t)
861
862         win_size = min(len(data_t), table[u"window"])
863         long_win_size = min(len(data_t), table[u"long-trend-window"])
864
865         try:
866             max_long_avg = max(
867                 [x for x in avgs[-long_win_size:-win_size]
868                  if not isnan(x)])
869         except ValueError:
870             max_long_avg = nan
871         last_avg = avgs[-1]
872         avg_week_ago = avgs[max(-win_size, -len(avgs))]
873
874         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
875             rel_change_last = nan
876         else:
877             rel_change_last = round(
878                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
879
880         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
881             rel_change_long = nan
882         else:
883             rel_change_long = round(
884                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
885
886         if classification_lst:
887             if isnan(rel_change_last) and isnan(rel_change_long):
888                 continue
889             if isnan(last_avg) or isnan(rel_change_last) or \
890                     isnan(rel_change_long):
891                 continue
892             tbl_lst.append(
893                 [tbl_dict[tst_name][u"name"],
894                  round(last_avg / 1e6, 2),
895                  rel_change_last,
896                  rel_change_long,
897                  classification_lst[-win_size:].count(u"regression"),
898                  classification_lst[-win_size:].count(u"progression")])
899
900     tbl_lst.sort(key=lambda rel: rel[0])
901
902     tbl_sorted = list()
903     for nrr in range(table[u"window"], -1, -1):
904         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
905         for nrp in range(table[u"window"], -1, -1):
906             tbl_out = [item for item in tbl_reg if item[5] == nrp]
907             tbl_out.sort(key=lambda rel: rel[2])
908             tbl_sorted.extend(tbl_out)
909
910     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
911
912     logging.info(f"    Writing file: {file_name}")
913     with open(file_name, u"wt") as file_handler:
914         file_handler.write(header_str)
915         for test in tbl_sorted:
916             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
917
918     logging.info(f"    Writing file: {table[u'output-file']}.txt")
919     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
920
921
922 def _generate_url(testbed, test_name):
923     """Generate URL to a trending plot from the name of the test case.
924
925     :param testbed: The testbed used for testing.
926     :param test_name: The name of the test case.
927     :type testbed: str
928     :type test_name: str
929     :returns: The URL to the plot with the trending data for the given test
930         case.
931     :rtype str
932     """
933
934     if u"x520" in test_name:
935         nic = u"x520"
936     elif u"x710" in test_name:
937         nic = u"x710"
938     elif u"xl710" in test_name:
939         nic = u"xl710"
940     elif u"xxv710" in test_name:
941         nic = u"xxv710"
942     elif u"vic1227" in test_name:
943         nic = u"vic1227"
944     elif u"vic1385" in test_name:
945         nic = u"vic1385"
946     elif u"x553" in test_name:
947         nic = u"x553"
948     elif u"cx556" in test_name or u"cx556a" in test_name:
949         nic = u"cx556a"
950     else:
951         nic = u""
952
953     if u"64b" in test_name:
954         frame_size = u"64b"
955     elif u"78b" in test_name:
956         frame_size = u"78b"
957     elif u"imix" in test_name:
958         frame_size = u"imix"
959     elif u"9000b" in test_name:
960         frame_size = u"9000b"
961     elif u"1518b" in test_name:
962         frame_size = u"1518b"
963     elif u"114b" in test_name:
964         frame_size = u"114b"
965     else:
966         frame_size = u""
967
968     if u"1t1c" in test_name or \
969         (u"-1c-" in test_name and
970          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
971         cores = u"1t1c"
972     elif u"2t2c" in test_name or \
973          (u"-2c-" in test_name and
974           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
975         cores = u"2t2c"
976     elif u"4t4c" in test_name or \
977          (u"-4c-" in test_name and
978           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
979         cores = u"4t4c"
980     elif u"2t1c" in test_name or \
981          (u"-1c-" in test_name and
982           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
983         cores = u"2t1c"
984     elif u"4t2c" in test_name or \
985          (u"-2c-" in test_name and
986           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
987         cores = u"4t2c"
988     elif u"8t4c" in test_name or \
989          (u"-4c-" in test_name and
990           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
991         cores = u"8t4c"
992     else:
993         cores = u""
994
995     if u"testpmd" in test_name:
996         driver = u"testpmd"
997     elif u"l3fwd" in test_name:
998         driver = u"l3fwd"
999     elif u"avf" in test_name:
1000         driver = u"avf"
1001     elif u"rdma" in test_name:
1002         driver = u"rdma"
1003     elif u"dnv" in testbed or u"tsh" in testbed:
1004         driver = u"ixgbe"
1005     else:
1006         driver = u"dpdk"
1007
1008     if u"acl" in test_name or \
1009             u"macip" in test_name or \
1010             u"nat" in test_name or \
1011             u"policer" in test_name or \
1012             u"cop" in test_name:
1013         bsf = u"features"
1014     elif u"scale" in test_name:
1015         bsf = u"scale"
1016     elif u"base" in test_name:
1017         bsf = u"base"
1018     else:
1019         bsf = u"base"
1020
1021     if u"114b" in test_name and u"vhost" in test_name:
1022         domain = u"vts"
1023     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1024         domain = u"dpdk"
1025     elif u"memif" in test_name:
1026         domain = u"container_memif"
1027     elif u"srv6" in test_name:
1028         domain = u"srv6"
1029     elif u"vhost" in test_name:
1030         domain = u"vhost"
1031         if u"vppl2xc" in test_name:
1032             driver += u"-vpp"
1033         else:
1034             driver += u"-testpmd"
1035         if u"lbvpplacp" in test_name:
1036             bsf += u"-link-bonding"
1037     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1038         domain = u"nf_service_density_vnfc"
1039     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1040         domain = u"nf_service_density_cnfc"
1041     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1042         domain = u"nf_service_density_cnfp"
1043     elif u"ipsec" in test_name:
1044         domain = u"ipsec"
1045         if u"sw" in test_name:
1046             bsf += u"-sw"
1047         elif u"hw" in test_name:
1048             bsf += u"-hw"
1049     elif u"ethip4vxlan" in test_name:
1050         domain = u"ip4_tunnels"
1051     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1052         domain = u"ip4"
1053     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1054         domain = u"ip6"
1055     elif u"l2xcbase" in test_name or \
1056             u"l2xcscale" in test_name or \
1057             u"l2bdbasemaclrn" in test_name or \
1058             u"l2bdscale" in test_name or \
1059             u"l2patch" in test_name:
1060         domain = u"l2"
1061     else:
1062         domain = u""
1063
1064     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1065     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1066
1067     return file_name + anchor_name
1068
1069
1070 def table_perf_trending_dash_html(table, input_data):
1071     """Generate the table(s) with algorithm:
1072     table_perf_trending_dash_html specified in the specification
1073     file.
1074
1075     :param table: Table to generate.
1076     :param input_data: Data to process.
1077     :type table: dict
1078     :type input_data: InputData
1079     """
1080
1081     _ = input_data
1082
1083     if not table.get(u"testbed", None):
1084         logging.error(
1085             f"The testbed is not defined for the table "
1086             f"{table.get(u'title', u'')}."
1087         )
1088         return
1089
1090     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1091
1092     try:
1093         with open(table[u"input-file"], u'rt') as csv_file:
1094             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1095     except KeyError:
1096         logging.warning(u"The input file is not defined.")
1097         return
1098     except csv.Error as err:
1099         logging.warning(
1100             f"Not possible to process the file {table[u'input-file']}.\n"
1101             f"{repr(err)}"
1102         )
1103         return
1104
1105     # Table:
1106     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1107
1108     # Table header:
1109     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1110     for idx, item in enumerate(csv_lst[0]):
1111         alignment = u"left" if idx == 0 else u"center"
1112         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1113         thead.text = item
1114
1115     # Rows:
1116     colors = {
1117         u"regression": (
1118             u"#ffcccc",
1119             u"#ff9999"
1120         ),
1121         u"progression": (
1122             u"#c6ecc6",
1123             u"#9fdf9f"
1124         ),
1125         u"normal": (
1126             u"#e9f1fb",
1127             u"#d4e4f7"
1128         )
1129     }
1130     for r_idx, row in enumerate(csv_lst[1:]):
1131         if int(row[4]):
1132             color = u"regression"
1133         elif int(row[5]):
1134             color = u"progression"
1135         else:
1136             color = u"normal"
1137         trow = ET.SubElement(
1138             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1139         )
1140
1141         # Columns:
1142         for c_idx, item in enumerate(row):
1143             tdata = ET.SubElement(
1144                 trow,
1145                 u"td",
1146                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1147             )
1148             # Name:
1149             if c_idx == 0:
1150                 ref = ET.SubElement(
1151                     tdata,
1152                     u"a",
1153                     attrib=dict(
1154                         href=f"../trending/"
1155                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1156                     )
1157                 )
1158                 ref.text = item
1159             else:
1160                 tdata.text = item
1161     try:
1162         with open(table[u"output-file"], u'w') as html_file:
1163             logging.info(f"    Writing file: {table[u'output-file']}")
1164             html_file.write(u".. raw:: html\n\n\t")
1165             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1166             html_file.write(u"\n\t<p><br><br></p>\n")
1167     except KeyError:
1168         logging.warning(u"The output file is not defined.")
1169         return
1170
1171
1172 def table_last_failed_tests(table, input_data):
1173     """Generate the table(s) with algorithm: table_last_failed_tests
1174     specified in the specification file.
1175
1176     :param table: Table to generate.
1177     :param input_data: Data to process.
1178     :type table: pandas.Series
1179     :type input_data: InputData
1180     """
1181
1182     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1183
1184     # Transform the data
1185     logging.info(
1186         f"    Creating the data set for the {table.get(u'type', u'')} "
1187         f"{table.get(u'title', u'')}."
1188     )
1189
1190     data = input_data.filter_data(table, continue_on_error=True)
1191
1192     if data is None or data.empty:
1193         logging.warning(
1194             f"    No data for the {table.get(u'type', u'')} "
1195             f"{table.get(u'title', u'')}."
1196         )
1197         return
1198
1199     tbl_list = list()
1200     for job, builds in table[u"data"].items():
1201         for build in builds:
1202             build = str(build)
1203             try:
1204                 version = input_data.metadata(job, build).get(u"version", u"")
1205             except KeyError:
1206                 logging.error(f"Data for {job}: {build} is not present.")
1207                 return
1208             tbl_list.append(build)
1209             tbl_list.append(version)
1210             failed_tests = list()
1211             passed = 0
1212             failed = 0
1213             for tst_data in data[job][build].values:
1214                 if tst_data[u"status"] != u"FAIL":
1215                     passed += 1
1216                     continue
1217                 failed += 1
1218                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1219                 if not groups:
1220                     continue
1221                 nic = groups.group(0)
1222                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1223             tbl_list.append(str(passed))
1224             tbl_list.append(str(failed))
1225             tbl_list.extend(failed_tests)
1226
1227     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1228     logging.info(f"    Writing file: {file_name}")
1229     with open(file_name, u"wt") as file_handler:
1230         for test in tbl_list:
1231             file_handler.write(test + u'\n')
1232
1233
1234 def table_failed_tests(table, input_data):
1235     """Generate the table(s) with algorithm: table_failed_tests
1236     specified in the specification file.
1237
1238     :param table: Table to generate.
1239     :param input_data: Data to process.
1240     :type table: pandas.Series
1241     :type input_data: InputData
1242     """
1243
1244     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1245
1246     # Transform the data
1247     logging.info(
1248         f"    Creating the data set for the {table.get(u'type', u'')} "
1249         f"{table.get(u'title', u'')}."
1250     )
1251     data = input_data.filter_data(table, continue_on_error=True)
1252
1253     # Prepare the header of the tables
1254     header = [
1255         u"Test Case",
1256         u"Failures [#]",
1257         u"Last Failure [Time]",
1258         u"Last Failure [VPP-Build-Id]",
1259         u"Last Failure [CSIT-Job-Build-Id]"
1260     ]
1261
1262     # Generate the data for the table according to the model in the table
1263     # specification
1264
1265     now = dt.utcnow()
1266     timeperiod = timedelta(int(table.get(u"window", 7)))
1267
1268     tbl_dict = dict()
1269     for job, builds in table[u"data"].items():
1270         for build in builds:
1271             build = str(build)
1272             for tst_name, tst_data in data[job][build].items():
1273                 if tst_name.lower() in table.get(u"ignore-list", list()):
1274                     continue
1275                 if tbl_dict.get(tst_name, None) is None:
1276                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1277                     if not groups:
1278                         continue
1279                     nic = groups.group(0)
1280                     tbl_dict[tst_name] = {
1281                         u"name": f"{nic}-{tst_data[u'name']}",
1282                         u"data": OrderedDict()
1283                     }
1284                 try:
1285                     generated = input_data.metadata(job, build).\
1286                         get(u"generated", u"")
1287                     if not generated:
1288                         continue
1289                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1290                     if (now - then) <= timeperiod:
1291                         tbl_dict[tst_name][u"data"][build] = (
1292                             tst_data[u"status"],
1293                             generated,
1294                             input_data.metadata(job, build).get(u"version",
1295                                                                 u""),
1296                             build
1297                         )
1298                 except (TypeError, KeyError) as err:
1299                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1300
1301     max_fails = 0
1302     tbl_lst = list()
1303     for tst_data in tbl_dict.values():
1304         fails_nr = 0
1305         fails_last_date = u""
1306         fails_last_vpp = u""
1307         fails_last_csit = u""
1308         for val in tst_data[u"data"].values():
1309             if val[0] == u"FAIL":
1310                 fails_nr += 1
1311                 fails_last_date = val[1]
1312                 fails_last_vpp = val[2]
1313                 fails_last_csit = val[3]
1314         if fails_nr:
1315             max_fails = fails_nr if fails_nr > max_fails else max_fails
1316             tbl_lst.append(
1317                 [
1318                     tst_data[u"name"],
1319                     fails_nr,
1320                     fails_last_date,
1321                     fails_last_vpp,
1322                     f"mrr-daily-build-{fails_last_csit}"
1323                 ]
1324             )
1325
1326     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1327     tbl_sorted = list()
1328     for nrf in range(max_fails, -1, -1):
1329         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1330         tbl_sorted.extend(tbl_fails)
1331
1332     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1333     logging.info(f"    Writing file: {file_name}")
1334     with open(file_name, u"wt") as file_handler:
1335         file_handler.write(u",".join(header) + u"\n")
1336         for test in tbl_sorted:
1337             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1338
1339     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1340     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1341
1342
1343 def table_failed_tests_html(table, input_data):
1344     """Generate the table(s) with algorithm: table_failed_tests_html
1345     specified in the specification file.
1346
1347     :param table: Table to generate.
1348     :param input_data: Data to process.
1349     :type table: pandas.Series
1350     :type input_data: InputData
1351     """
1352
1353     _ = input_data
1354
1355     if not table.get(u"testbed", None):
1356         logging.error(
1357             f"The testbed is not defined for the table "
1358             f"{table.get(u'title', u'')}."
1359         )
1360         return
1361
1362     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1363
1364     try:
1365         with open(table[u"input-file"], u'rt') as csv_file:
1366             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1367     except KeyError:
1368         logging.warning(u"The input file is not defined.")
1369         return
1370     except csv.Error as err:
1371         logging.warning(
1372             f"Not possible to process the file {table[u'input-file']}.\n"
1373             f"{repr(err)}"
1374         )
1375         return
1376
1377     # Table:
1378     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1379
1380     # Table header:
1381     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1382     for idx, item in enumerate(csv_lst[0]):
1383         alignment = u"left" if idx == 0 else u"center"
1384         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1385         thead.text = item
1386
1387     # Rows:
1388     colors = (u"#e9f1fb", u"#d4e4f7")
1389     for r_idx, row in enumerate(csv_lst[1:]):
1390         background = colors[r_idx % 2]
1391         trow = ET.SubElement(
1392             failed_tests, u"tr", attrib=dict(bgcolor=background)
1393         )
1394
1395         # Columns:
1396         for c_idx, item in enumerate(row):
1397             tdata = ET.SubElement(
1398                 trow,
1399                 u"td",
1400                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1401             )
1402             # Name:
1403             if c_idx == 0:
1404                 ref = ET.SubElement(
1405                     tdata,
1406                     u"a",
1407                     attrib=dict(
1408                         href=f"../trending/"
1409                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1410                     )
1411                 )
1412                 ref.text = item
1413             else:
1414                 tdata.text = item
1415     try:
1416         with open(table[u"output-file"], u'w') as html_file:
1417             logging.info(f"    Writing file: {table[u'output-file']}")
1418             html_file.write(u".. raw:: html\n\n\t")
1419             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1420             html_file.write(u"\n\t<p><br><br></p>\n")
1421     except KeyError:
1422         logging.warning(u"The output file is not defined.")
1423         return
1424
1425
1426 def table_comparison(table, input_data):
1427     """Generate the table(s) with algorithm: table_comparison
1428     specified in the specification file.
1429
1430     :param table: Table to generate.
1431     :param input_data: Data to process.
1432     :type table: pandas.Series
1433     :type input_data: InputData
1434     """
1435     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1436
1437     # Transform the data
1438     logging.info(
1439         f"    Creating the data set for the {table.get(u'type', u'')} "
1440         f"{table.get(u'title', u'')}."
1441     )
1442
1443     columns = table.get(u"columns", None)
1444     if not columns:
1445         logging.error(
1446             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1447         )
1448         return
1449
1450     cols = list()
1451     for idx, col in enumerate(columns):
1452         if col.get(u"data-set", None) is None:
1453             logging.warning(f"No data for column {col.get(u'title', u'')}")
1454             continue
1455         tag = col.get(u"tag", None)
1456         data = input_data.filter_data(
1457             table,
1458             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1459             data=col[u"data-set"],
1460             continue_on_error=True
1461         )
1462         col_data = {
1463             u"title": col.get(u"title", f"Column{idx}"),
1464             u"data": dict()
1465         }
1466         for builds in data.values:
1467             for build in builds:
1468                 for tst_name, tst_data in build.items():
1469                     if tag and tag not in tst_data[u"tags"]:
1470                         continue
1471                     tst_name_mod = \
1472                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1473                         replace(u"2n1l-", u"")
1474                     if col_data[u"data"].get(tst_name_mod, None) is None:
1475                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1476                         if u"across testbeds" in table[u"title"].lower() or \
1477                                 u"across topologies" in table[u"title"].lower():
1478                             name = _tpc_modify_displayed_test_name(name)
1479                         col_data[u"data"][tst_name_mod] = {
1480                             u"name": name,
1481                             u"replace": True,
1482                             u"data": list(),
1483                             u"mean": None,
1484                             u"stdev": None
1485                         }
1486                     _tpc_insert_data(
1487                         target=col_data[u"data"][tst_name_mod],
1488                         src=tst_data,
1489                         include_tests=table[u"include-tests"]
1490                     )
1491
1492         replacement = col.get(u"data-replacement", None)
1493         if replacement:
1494             rpl_data = input_data.filter_data(
1495                 table,
1496                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1497                 data=replacement,
1498                 continue_on_error=True
1499             )
1500             for builds in rpl_data.values:
1501                 for build in builds:
1502                     for tst_name, tst_data in build.items():
1503                         if tag and tag not in tst_data[u"tags"]:
1504                             continue
1505                         tst_name_mod = \
1506                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1507                             replace(u"2n1l-", u"")
1508                         if col_data[u"data"].get(tst_name_mod, None) is None:
1509                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1510                             if u"across testbeds" in table[u"title"].lower() \
1511                                     or u"across topologies" in \
1512                                     table[u"title"].lower():
1513                                 name = _tpc_modify_displayed_test_name(name)
1514                             col_data[u"data"][tst_name_mod] = {
1515                                 u"name": name,
1516                                 u"replace": False,
1517                                 u"data": list(),
1518                                 u"mean": None,
1519                                 u"stdev": None
1520                             }
1521                         if col_data[u"data"][tst_name_mod][u"replace"]:
1522                             col_data[u"data"][tst_name_mod][u"replace"] = False
1523                             col_data[u"data"][tst_name_mod][u"data"] = list()
1524                         _tpc_insert_data(
1525                             target=col_data[u"data"][tst_name_mod],
1526                             src=tst_data,
1527                             include_tests=table[u"include-tests"]
1528                         )
1529
1530         if table[u"include-tests"] in (u"NDR", u"PDR"):
1531             for tst_name, tst_data in col_data[u"data"].items():
1532                 if tst_data[u"data"]:
1533                     tst_data[u"mean"] = mean(tst_data[u"data"])
1534                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1535
1536         cols.append(col_data)
1537
1538     tbl_dict = dict()
1539     for col in cols:
1540         for tst_name, tst_data in col[u"data"].items():
1541             if tbl_dict.get(tst_name, None) is None:
1542                 tbl_dict[tst_name] = {
1543                     "name": tst_data[u"name"]
1544                 }
1545             tbl_dict[tst_name][col[u"title"]] = {
1546                 u"mean": tst_data[u"mean"],
1547                 u"stdev": tst_data[u"stdev"]
1548             }
1549
1550     if not tbl_dict:
1551         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1552         return
1553
1554     tbl_lst = list()
1555     for tst_data in tbl_dict.values():
1556         row = [tst_data[u"name"], ]
1557         for col in cols:
1558             row.append(tst_data.get(col[u"title"], None))
1559         tbl_lst.append(row)
1560
1561     comparisons = table.get(u"comparisons", None)
1562     if comparisons and isinstance(comparisons, list):
1563         for idx, comp in enumerate(comparisons):
1564             try:
1565                 col_ref = int(comp[u"reference"])
1566                 col_cmp = int(comp[u"compare"])
1567             except KeyError:
1568                 logging.warning(u"Comparison: No references defined! Skipping.")
1569                 comparisons.pop(idx)
1570                 continue
1571             if not (0 < col_ref <= len(cols) and
1572                     0 < col_cmp <= len(cols)) or \
1573                     col_ref == col_cmp:
1574                 logging.warning(f"Wrong values of reference={col_ref} "
1575                                 f"and/or compare={col_cmp}. Skipping.")
1576                 comparisons.pop(idx)
1577                 continue
1578
1579     tbl_cmp_lst = list()
1580     if comparisons:
1581         for row in tbl_lst:
1582             new_row = deepcopy(row)
1583             add_to_tbl = False
1584             for comp in comparisons:
1585                 ref_itm = row[int(comp[u"reference"])]
1586                 if ref_itm is None and \
1587                         comp.get(u"reference-alt", None) is not None:
1588                     ref_itm = row[int(comp[u"reference-alt"])]
1589                 cmp_itm = row[int(comp[u"compare"])]
1590                 if ref_itm is not None and cmp_itm is not None and \
1591                         ref_itm[u"mean"] is not None and \
1592                         cmp_itm[u"mean"] is not None and \
1593                         ref_itm[u"stdev"] is not None and \
1594                         cmp_itm[u"stdev"] is not None:
1595                     delta, d_stdev = relative_change_stdev(
1596                         ref_itm[u"mean"], cmp_itm[u"mean"],
1597                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
1598                     )
1599                     new_row.append(
1600                         {
1601                             u"mean": delta * 1e6,
1602                             u"stdev": d_stdev * 1e6
1603                         }
1604                     )
1605                     add_to_tbl = True
1606                 else:
1607                     new_row.append(None)
1608             if add_to_tbl:
1609                 tbl_cmp_lst.append(new_row)
1610
1611     tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1612     tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1613
1614     rcas = list()
1615     rca_in = table.get(u"rca", None)
1616     if rca_in and isinstance(rca_in, list):
1617         for idx, itm in enumerate(rca_in):
1618             try:
1619                 with open(itm.get(u"data", u""), u"r") as rca_file:
1620                     rcas.append(
1621                         {
1622                             u"title": itm.get(u"title", f"RCA{idx}"),
1623                             u"data": load(rca_file, Loader=FullLoader)
1624                         }
1625                     )
1626             except (YAMLError, IOError) as err:
1627                 logging.warning(
1628                     f"The RCA file {itm.get(u'data', u'')} does not exist or "
1629                     f"it is corrupted!"
1630                 )
1631                 logging.debug(repr(err))
1632
1633     tbl_for_csv = list()
1634     for line in tbl_cmp_lst:
1635         row = [line[0], ]
1636         for idx, itm in enumerate(line[1:]):
1637             if itm is None:
1638                 row.append(u"NT")
1639                 row.append(u"NT")
1640             else:
1641                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1642                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1643         for rca in rcas:
1644             rca_nr = rca[u"data"].get(row[0], u"-")
1645             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1646         tbl_for_csv.append(row)
1647
1648     header_csv = [u"Test Case", ]
1649     for col in cols:
1650         header_csv.append(f"Avg({col[u'title']})")
1651         header_csv.append(f"Stdev({col[u'title']})")
1652     for comp in comparisons:
1653         header_csv.append(
1654             f"Avg({comp.get(u'title', u'')})"
1655         )
1656         header_csv.append(
1657             f"Stdev({comp.get(u'title', u'')})"
1658         )
1659     header_csv.extend([rca[u"title"] for rca in rcas])
1660
1661     legend_lst = table.get(u"legend", None)
1662     if legend_lst is None:
1663         legend = u""
1664     else:
1665         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1666
1667     footnote = u""
1668     for rca in rcas:
1669         footnote += f"\n{rca[u'title']}:\n"
1670         footnote += rca[u"data"].get(u"footnote", u"")
1671
1672     csv_file = f"{table[u'output-file']}-csv.csv"
1673     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
1674         file_handler.write(
1675             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1676         )
1677         for test in tbl_for_csv:
1678             file_handler.write(
1679                 u",".join([f'"{item}"' for item in test]) + u"\n"
1680             )
1681         if legend_lst:
1682             for item in legend_lst:
1683                 file_handler.write(f'"{item}"\n')
1684         if footnote:
1685             for itm in footnote.split(u"\n"):
1686                 file_handler.write(f'"{itm}"\n')
1687
1688     tbl_tmp = list()
1689     max_lens = [0, ] * len(tbl_cmp_lst[0])
1690     for line in tbl_cmp_lst:
1691         row = [line[0], ]
1692         for idx, itm in enumerate(line[1:]):
1693             if itm is None:
1694                 new_itm = u"NT"
1695             else:
1696                 if idx < len(cols):
1697                     new_itm = (
1698                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1699                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1700                         replace(u"nan", u"NaN")
1701                     )
1702                 else:
1703                     new_itm = (
1704                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1705                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1706                         replace(u"nan", u"NaN")
1707                     )
1708             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1709                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1710             row.append(new_itm)
1711
1712         tbl_tmp.append(row)
1713
1714     tbl_final = list()
1715     for line in tbl_tmp:
1716         row = [line[0], ]
1717         for idx, itm in enumerate(line[1:]):
1718             if itm in (u"NT", u"NaN"):
1719                 row.append(itm)
1720                 continue
1721             itm_lst = itm.rsplit(u"\u00B1", 1)
1722             itm_lst[-1] = \
1723                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1724             row.append(u"\u00B1".join(itm_lst))
1725         for rca in rcas:
1726             rca_nr = rca[u"data"].get(row[0], u"-")
1727             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1728
1729         tbl_final.append(row)
1730
1731     header = [u"Test Case", ]
1732     header.extend([col[u"title"] for col in cols])
1733     header.extend([comp.get(u"title", u"") for comp in comparisons])
1734     header.extend([rca[u"title"] for rca in rcas])
1735
1736     # Generate csv tables:
1737     csv_file = f"{table[u'output-file']}.csv"
1738     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
1739         file_handler.write(u";".join(header) + u"\n")
1740         for test in tbl_final:
1741             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1742
1743     # Generate txt table:
1744     txt_file_name = f"{table[u'output-file']}.txt"
1745     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1746
1747     with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
1748         txt_file.write(legend)
1749         txt_file.write(footnote)
1750         if legend or footnote:
1751             txt_file.write(u"\n:END")
1752
1753     # Generate html table:
1754     _tpc_generate_html_table(
1755         header,
1756         tbl_final,
1757         table[u'output-file'],
1758         legend=legend,
1759         footnote=footnote,
1760         sort_data=False,
1761         title=table.get(u"title", u"")
1762     )
1763
1764
1765 def table_weekly_comparison(table, in_data):
1766     """Generate the table(s) with algorithm: table_weekly_comparison
1767     specified in the specification file.
1768
1769     :param table: Table to generate.
1770     :param in_data: Data to process.
1771     :type table: pandas.Series
1772     :type in_data: InputData
1773     """
1774     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1775
1776     # Transform the data
1777     logging.info(
1778         f"    Creating the data set for the {table.get(u'type', u'')} "
1779         f"{table.get(u'title', u'')}."
1780     )
1781
1782     incl_tests = table.get(u"include-tests", None)
1783     if incl_tests not in (u"NDR", u"PDR"):
1784         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1785         return
1786
1787     nr_cols = table.get(u"nr-of-data-columns", None)
1788     if not nr_cols or nr_cols < 2:
1789         logging.error(
1790             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1791         )
1792         return
1793
1794     data = in_data.filter_data(
1795         table,
1796         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1797         continue_on_error=True
1798     )
1799
1800     header = [
1801         [u"Version"],
1802         [u"Date", ],
1803         [u"Build", ],
1804         [u"Testbed", ]
1805     ]
1806     tbl_dict = dict()
1807     idx = 0
1808     tb_tbl = table.get(u"testbeds", None)
1809     for job_name, job_data in data.items():
1810         for build_nr, build in job_data.items():
1811             if idx >= nr_cols:
1812                 break
1813             if build.empty:
1814                 continue
1815
1816             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1817             if tb_ip and tb_tbl:
1818                 testbed = tb_tbl.get(tb_ip, u"")
1819             else:
1820                 testbed = u""
1821             header[2].insert(1, build_nr)
1822             header[3].insert(1, testbed)
1823             header[1].insert(
1824                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
1825             )
1826             header[0].insert(
1827                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
1828             )
1829
1830             for tst_name, tst_data in build.items():
1831                 tst_name_mod = \
1832                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
1833                 if not tbl_dict.get(tst_name_mod, None):
1834                     tbl_dict[tst_name_mod] = dict(
1835                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
1836                     )
1837                 try:
1838                     tbl_dict[tst_name_mod][-idx - 1] = \
1839                         tst_data[u"throughput"][incl_tests][u"LOWER"]
1840                 except (TypeError, IndexError, KeyError, ValueError):
1841                     pass
1842             idx += 1
1843
1844     if idx < nr_cols:
1845         logging.error(u"Not enough data to build the table! Skipping")
1846         return
1847
1848     cmp_dict = dict()
1849     for idx, cmp in enumerate(table.get(u"comparisons", list())):
1850         idx_ref = cmp.get(u"reference", None)
1851         idx_cmp = cmp.get(u"compare", None)
1852         if idx_ref is None or idx_cmp is None:
1853             continue
1854         header[0].append(f"Diff{idx + 1}")
1855         header[1].append(header[0][idx_ref - idx - 1])
1856         header[2].append(u"vs")
1857         header[3].append(header[0][idx_cmp - idx - 1])
1858         for tst_name, tst_data in tbl_dict.items():
1859             if not cmp_dict.get(tst_name, None):
1860                 cmp_dict[tst_name] = list()
1861             ref_data = tst_data.get(idx_ref, None)
1862             cmp_data = tst_data.get(idx_cmp, None)
1863             if ref_data is None or cmp_data is None:
1864                 cmp_dict[tst_name].append(float('nan'))
1865             else:
1866                 cmp_dict[tst_name].append(
1867                     relative_change(ref_data, cmp_data)
1868                 )
1869
1870     tbl_lst = list()
1871     for tst_name, tst_data in tbl_dict.items():
1872         itm_lst = [tst_data[u"name"], ]
1873         for idx in range(nr_cols):
1874             item = tst_data.get(-idx - 1, None)
1875             if item is None:
1876                 itm_lst.insert(1, None)
1877             else:
1878                 itm_lst.insert(1, round(item / 1e6, 1))
1879         itm_lst.extend(
1880             [
1881                 None if itm is None else round(itm, 1)
1882                 for itm in cmp_dict[tst_name]
1883             ]
1884         )
1885         tbl_lst.append(itm_lst)
1886
1887     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
1888     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1889
1890     # Generate csv table:
1891     csv_file = f"{table[u'output-file']}.csv"
1892     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
1893         for hdr in header:
1894             file_handler.write(u",".join(hdr) + u"\n")
1895         for test in tbl_lst:
1896             file_handler.write(u",".join(
1897                 [
1898                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
1899                     replace(u"null", u"-") for item in test
1900                 ]
1901             ) + u"\n")
1902
1903     txt_file = f"{table[u'output-file']}.txt"
1904     convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
1905
1906     # Reorganize header in txt table
1907     txt_table = list()
1908     with open(txt_file, u"rt", encoding='utf-8') as file_handler:
1909         for line in file_handler:
1910             txt_table.append(line)
1911     try:
1912         txt_table.insert(5, txt_table.pop(2))
1913         with open(txt_file, u"wt", encoding='utf-8') as file_handler:
1914             file_handler.writelines(txt_table)
1915     except IndexError:
1916         pass
1917
1918     # Generate html table:
1919     hdr_html = [
1920         u"<br>".join(row) for row in zip(*header)
1921     ]
1922     _tpc_generate_html_table(
1923         hdr_html,
1924         tbl_lst,
1925         table[u'output-file'],
1926         sort_data=True,
1927         title=table.get(u"title", u""),
1928         generate_rst=False
1929     )