Report: Fix typo in specification.
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
30 import pandas as pd
31
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
34
35 from pal_utils import mean, stdev, classify_anomalies, \
36     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
37
38
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40
41
42 def generate_tables(spec, data):
43     """Generate all tables specified in the specification file.
44
45     :param spec: Specification read from the specification file.
46     :param data: Data to process.
47     :type spec: Specification
48     :type data: InputData
49     """
50
51     generator = {
52         u"table_merged_details": table_merged_details,
53         u"table_soak_vs_ndr": table_soak_vs_ndr,
54         u"table_perf_trending_dash": table_perf_trending_dash,
55         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
56         u"table_last_failed_tests": table_last_failed_tests,
57         u"table_failed_tests": table_failed_tests,
58         u"table_failed_tests_html": table_failed_tests_html,
59         u"table_oper_data_html": table_oper_data_html,
60         u"table_comparison": table_comparison,
61         u"table_weekly_comparison": table_weekly_comparison
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             if table[u"algorithm"] == u"table_weekly_comparison":
68                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
69             generator[table[u"algorithm"]](table, data)
70         except NameError as err:
71             logging.error(
72                 f"Probably algorithm {table[u'algorithm']} is not defined: "
73                 f"{repr(err)}"
74             )
75     logging.info(u"Done.")
76
77
78 def table_oper_data_html(table, input_data):
79     """Generate the table(s) with algorithm: html_table_oper_data
80     specified in the specification file.
81
82     :param table: Table to generate.
83     :param input_data: Data to process.
84     :type table: pandas.Series
85     :type input_data: InputData
86     """
87
88     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
89     # Transform the data
90     logging.info(
91         f"    Creating the data set for the {table.get(u'type', u'')} "
92         f"{table.get(u'title', u'')}."
93     )
94     data = input_data.filter_data(
95         table,
96         params=[u"name", u"parent", u"show-run", u"type"],
97         continue_on_error=True
98     )
99     if data.empty:
100         return
101     data = input_data.merge_data(data)
102
103     sort_tests = table.get(u"sort", None)
104     if sort_tests:
105         args = dict(
106             inplace=True,
107             ascending=(sort_tests == u"ascending")
108         )
109         data.sort_index(**args)
110
111     suites = input_data.filter_data(
112         table,
113         continue_on_error=True,
114         data_set=u"suites"
115     )
116     if suites.empty:
117         return
118     suites = input_data.merge_data(suites)
119
120     def _generate_html_table(tst_data):
121         """Generate an HTML table with operational data for the given test.
122
123         :param tst_data: Test data to be used to generate the table.
124         :type tst_data: pandas.Series
125         :returns: HTML table with operational data.
126         :rtype: str
127         """
128
129         colors = {
130             u"header": u"#7eade7",
131             u"empty": u"#ffffff",
132             u"body": (u"#e9f1fb", u"#d4e4f7")
133         }
134
135         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
136
137         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138         thead = ET.SubElement(
139             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140         )
141         thead.text = tst_data[u"name"]
142
143         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144         thead = ET.SubElement(
145             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146         )
147         thead.text = u"\t"
148
149         if tst_data.get(u"show-run", u"No Data") == u"No Data":
150             trow = ET.SubElement(
151                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
152             )
153             tcol = ET.SubElement(
154                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
155             )
156             tcol.text = u"No Data"
157
158             trow = ET.SubElement(
159                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
160             )
161             thead = ET.SubElement(
162                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
163             )
164             font = ET.SubElement(
165                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166             )
167             font.text = u"."
168             return str(ET.tostring(tbl, encoding=u"unicode"))
169
170         tbl_hdr = (
171             u"Name",
172             u"Nr of Vectors",
173             u"Nr of Packets",
174             u"Suspends",
175             u"Cycles per Packet",
176             u"Average Vector Size"
177         )
178
179         for dut_data in tst_data[u"show-run"].values():
180             trow = ET.SubElement(
181                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
182             )
183             tcol = ET.SubElement(
184                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
185             )
186             if dut_data.get(u"threads", None) is None:
187                 tcol.text = u"No Data"
188                 continue
189
190             bold = ET.SubElement(tcol, u"b")
191             bold.text = (
192                 f"Host IP: {dut_data.get(u'host', '')}, "
193                 f"Socket: {dut_data.get(u'socket', '')}"
194             )
195             trow = ET.SubElement(
196                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
197             )
198             thead = ET.SubElement(
199                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200             )
201             thead.text = u"\t"
202
203             for thread_nr, thread in dut_data[u"threads"].items():
204                 trow = ET.SubElement(
205                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
206                 )
207                 tcol = ET.SubElement(
208                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
209                 )
210                 bold = ET.SubElement(tcol, u"b")
211                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212                 trow = ET.SubElement(
213                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
214                 )
215                 for idx, col in enumerate(tbl_hdr):
216                     tcol = ET.SubElement(
217                         trow, u"td",
218                         attrib=dict(align=u"right" if idx else u"left")
219                     )
220                     font = ET.SubElement(
221                         tcol, u"font", attrib=dict(size=u"2")
222                     )
223                     bold = ET.SubElement(font, u"b")
224                     bold.text = col
225                 for row_nr, row in enumerate(thread):
226                     trow = ET.SubElement(
227                         tbl, u"tr",
228                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
229                     )
230                     for idx, col in enumerate(row):
231                         tcol = ET.SubElement(
232                             trow, u"td",
233                             attrib=dict(align=u"right" if idx else u"left")
234                         )
235                         font = ET.SubElement(
236                             tcol, u"font", attrib=dict(size=u"2")
237                         )
238                         if isinstance(col, float):
239                             font.text = f"{col:.2f}"
240                         else:
241                             font.text = str(col)
242                 trow = ET.SubElement(
243                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
244                 )
245                 thead = ET.SubElement(
246                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247                 )
248                 thead.text = u"\t"
249
250         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251         thead = ET.SubElement(
252             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
253         )
254         font = ET.SubElement(
255             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256         )
257         font.text = u"."
258
259         return str(ET.tostring(tbl, encoding=u"unicode"))
260
261     for suite in suites.values:
262         html_table = str()
263         for test_data in data.values:
264             if test_data[u"parent"] not in suite[u"name"]:
265                 continue
266             html_table += _generate_html_table(test_data)
267         if not html_table:
268             continue
269         try:
270             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271             with open(f"{file_name}", u'w') as html_file:
272                 logging.info(f"    Writing file: {file_name}")
273                 html_file.write(u".. raw:: html\n\n\t")
274                 html_file.write(html_table)
275                 html_file.write(u"\n\t<p><br><br></p>\n")
276         except KeyError:
277             logging.warning(u"The output file is not defined.")
278             return
279     logging.info(u"  Done.")
280
281
282 def table_merged_details(table, input_data):
283     """Generate the table(s) with algorithm: table_merged_details
284     specified in the specification file.
285
286     :param table: Table to generate.
287     :param input_data: Data to process.
288     :type table: pandas.Series
289     :type input_data: InputData
290     """
291
292     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
293
294     # Transform the data
295     logging.info(
296         f"    Creating the data set for the {table.get(u'type', u'')} "
297         f"{table.get(u'title', u'')}."
298     )
299     data = input_data.filter_data(table, continue_on_error=True)
300     data = input_data.merge_data(data)
301
302     sort_tests = table.get(u"sort", None)
303     if sort_tests:
304         args = dict(
305             inplace=True,
306             ascending=(sort_tests == u"ascending")
307         )
308         data.sort_index(**args)
309
310     suites = input_data.filter_data(
311         table, continue_on_error=True, data_set=u"suites")
312     suites = input_data.merge_data(suites)
313
314     # Prepare the header of the tables
315     header = list()
316     for column in table[u"columns"]:
317         header.append(
318             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319         )
320
321     for suite in suites.values:
322         # Generate data
323         suite_name = suite[u"name"]
324         table_lst = list()
325         for test in data.keys():
326             if data[test][u"parent"] not in suite_name:
327                 continue
328             row_lst = list()
329             for column in table[u"columns"]:
330                 try:
331                     col_data = str(data[test][column[
332                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
333                     # Do not include tests with "Test Failed" in test message
334                     if u"Test Failed" in col_data:
335                         continue
336                     col_data = col_data.replace(
337                         u"No Data", u"Not Captured     "
338                     )
339                     if column[u"data"].split(u" ")[1] in (u"name", ):
340                         if len(col_data) > 30:
341                             col_data_lst = col_data.split(u"-")
342                             half = int(len(col_data_lst) / 2)
343                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
344                                        f"- |br| " \
345                                        f"{u'-'.join(col_data_lst[half:])}"
346                         col_data = f" |prein| {col_data} |preout| "
347                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
348                         # Temporary solution: remove NDR results from message:
349                         if bool(table.get(u'remove-ndr', False)):
350                             try:
351                                 col_data = col_data.split(u" |br| ", 1)[1]
352                             except IndexError:
353                                 pass
354                         col_data = f" |prein| {col_data} |preout| "
355                     elif column[u"data"].split(u" ")[1] in \
356                             (u"conf-history", u"show-run"):
357                         col_data = col_data.replace(u" |br| ", u"", 1)
358                         col_data = f" |prein| {col_data[:-5]} |preout| "
359                     row_lst.append(f'"{col_data}"')
360                 except KeyError:
361                     row_lst.append(u'"Not captured"')
362             if len(row_lst) == len(table[u"columns"]):
363                 table_lst.append(row_lst)
364
365         # Write the data to file
366         if table_lst:
367             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369             logging.info(f"      Writing file: {file_name}")
370             with open(file_name, u"wt") as file_handler:
371                 file_handler.write(u",".join(header) + u"\n")
372                 for item in table_lst:
373                     file_handler.write(u",".join(item) + u"\n")
374
375     logging.info(u"  Done.")
376
377
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379     """Modify a test name by replacing its parts.
380
381     :param test_name: Test name to be modified.
382     :param ignore_nic: If True, NIC is removed from TC name.
383     :type test_name: str
384     :type ignore_nic: bool
385     :returns: Modified test name.
386     :rtype: str
387     """
388     test_name_mod = test_name.\
389         replace(u"-ndrpdrdisc", u""). \
390         replace(u"-ndrpdr", u"").\
391         replace(u"-pdrdisc", u""). \
392         replace(u"-ndrdisc", u"").\
393         replace(u"-pdr", u""). \
394         replace(u"-ndr", u""). \
395         replace(u"1t1c", u"1c").\
396         replace(u"2t1c", u"1c"). \
397         replace(u"2t2c", u"2c").\
398         replace(u"4t2c", u"2c"). \
399         replace(u"4t4c", u"4c").\
400         replace(u"8t4c", u"4c")
401
402     if ignore_nic:
403         return re.sub(REGEX_NIC, u"", test_name_mod)
404     return test_name_mod
405
406
407 def _tpc_modify_displayed_test_name(test_name):
408     """Modify a test name which is displayed in a table by replacing its parts.
409
410     :param test_name: Test name to be modified.
411     :type test_name: str
412     :returns: Modified test name.
413     :rtype: str
414     """
415     return test_name.\
416         replace(u"1t1c", u"1c").\
417         replace(u"2t1c", u"1c"). \
418         replace(u"2t2c", u"2c").\
419         replace(u"4t2c", u"2c"). \
420         replace(u"4t4c", u"4c").\
421         replace(u"8t4c", u"4c")
422
423
424 def _tpc_insert_data(target, src, include_tests):
425     """Insert src data to the target structure.
426
427     :param target: Target structure where the data is placed.
428     :param src: Source data to be placed into the target stucture.
429     :param include_tests: Which results will be included (MRR, NDR, PDR).
430     :type target: list
431     :type src: dict
432     :type include_tests: str
433     """
434     try:
435         if include_tests == u"MRR":
436             target[u"mean"] = src[u"result"][u"receive-rate"]
437             target[u"stdev"] = src[u"result"][u"receive-stdev"]
438         elif include_tests == u"PDR":
439             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
440         elif include_tests == u"NDR":
441             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
442     except (KeyError, TypeError):
443         pass
444
445
446 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
447                              footnote=u"", sort_data=True, title=u"",
448                              generate_rst=True):
449     """Generate html table from input data with simple sorting possibility.
450
451     :param header: Table header.
452     :param data: Input data to be included in the table. It is a list of lists.
453         Inner lists are rows in the table. All inner lists must be of the same
454         length. The length of these lists must be the same as the length of the
455         header.
456     :param out_file_name: The name (relative or full path) where the
457         generated html table is written.
458     :param legend: The legend to display below the table.
459     :param footnote: The footnote to display below the table (and legend).
460     :param sort_data: If True the data sorting is enabled.
461     :param title: The table (and file) title.
462     :param generate_rst: If True, wrapping rst file is generated.
463     :type header: list
464     :type data: list of lists
465     :type out_file_name: str
466     :type legend: str
467     :type footnote: str
468     :type sort_data: bool
469     :type title: str
470     :type generate_rst: bool
471     """
472
473     try:
474         idx = header.index(u"Test Case")
475     except ValueError:
476         idx = 0
477     params = {
478         u"align-hdr": (
479             [u"left", u"right"],
480             [u"left", u"left", u"right"],
481             [u"left", u"left", u"left", u"right"]
482         ),
483         u"align-itm": (
484             [u"left", u"right"],
485             [u"left", u"left", u"right"],
486             [u"left", u"left", u"left", u"right"]
487         ),
488         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
489     }
490
491     df_data = pd.DataFrame(data, columns=header)
492
493     if sort_data:
494         df_sorted = [df_data.sort_values(
495             by=[key, header[idx]], ascending=[True, True]
496             if key != header[idx] else [False, True]) for key in header]
497         df_sorted_rev = [df_data.sort_values(
498             by=[key, header[idx]], ascending=[False, True]
499             if key != header[idx] else [True, True]) for key in header]
500         df_sorted.extend(df_sorted_rev)
501     else:
502         df_sorted = df_data
503
504     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
505                    for idx in range(len(df_data))]]
506     table_header = dict(
507         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
508         fill_color=u"#7eade7",
509         align=params[u"align-hdr"][idx],
510         font=dict(
511             family=u"Courier New",
512             size=12
513         )
514     )
515
516     fig = go.Figure()
517
518     if sort_data:
519         for table in df_sorted:
520             columns = [table.get(col) for col in header]
521             fig.add_trace(
522                 go.Table(
523                     columnwidth=params[u"width"][idx],
524                     header=table_header,
525                     cells=dict(
526                         values=columns,
527                         fill_color=fill_color,
528                         align=params[u"align-itm"][idx],
529                         font=dict(
530                             family=u"Courier New",
531                             size=12
532                         )
533                     )
534                 )
535             )
536
537         buttons = list()
538         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
539         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
540         for idx, hdr in enumerate(menu_items):
541             visible = [False, ] * len(menu_items)
542             visible[idx] = True
543             buttons.append(
544                 dict(
545                     label=hdr.replace(u" [Mpps]", u""),
546                     method=u"update",
547                     args=[{u"visible": visible}],
548                 )
549             )
550
551         fig.update_layout(
552             updatemenus=[
553                 go.layout.Updatemenu(
554                     type=u"dropdown",
555                     direction=u"down",
556                     x=0.0,
557                     xanchor=u"left",
558                     y=1.002,
559                     yanchor=u"bottom",
560                     active=len(menu_items) - 1,
561                     buttons=list(buttons)
562                 )
563             ],
564         )
565     else:
566         fig.add_trace(
567             go.Table(
568                 columnwidth=params[u"width"][idx],
569                 header=table_header,
570                 cells=dict(
571                     values=[df_sorted.get(col) for col in header],
572                     fill_color=fill_color,
573                     align=params[u"align-itm"][idx],
574                     font=dict(
575                         family=u"Courier New",
576                         size=12
577                     )
578                 )
579             )
580         )
581
582     ploff.plot(
583         fig,
584         show_link=False,
585         auto_open=False,
586         filename=f"{out_file_name}_in.html"
587     )
588
589     if not generate_rst:
590         return
591
592     file_name = out_file_name.split(u"/")[-1]
593     if u"vpp" in out_file_name:
594         path = u"_tmp/src/vpp_performance_tests/comparisons/"
595     else:
596         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
597     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
598         rst_file.write(
599             u"\n"
600             u".. |br| raw:: html\n\n    <br />\n\n\n"
601             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
602             u".. |preout| raw:: html\n\n    </pre>\n\n"
603         )
604         if title:
605             rst_file.write(f"{title}\n")
606             rst_file.write(f"{u'`' * len(title)}\n\n")
607         rst_file.write(
608             u".. raw:: html\n\n"
609             f'    <iframe frameborder="0" scrolling="no" '
610             f'width="1600" height="1200" '
611             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
612             f'</iframe>\n\n'
613         )
614         if legend:
615             rst_file.write(legend[1:].replace(u"\n", u" |br| "))
616         if footnote:
617             rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
618
619
620 def table_soak_vs_ndr(table, input_data):
621     """Generate the table(s) with algorithm: table_soak_vs_ndr
622     specified in the specification file.
623
624     :param table: Table to generate.
625     :param input_data: Data to process.
626     :type table: pandas.Series
627     :type input_data: InputData
628     """
629
630     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
631
632     # Transform the data
633     logging.info(
634         f"    Creating the data set for the {table.get(u'type', u'')} "
635         f"{table.get(u'title', u'')}."
636     )
637     data = input_data.filter_data(table, continue_on_error=True)
638
639     # Prepare the header of the table
640     try:
641         header = [
642             u"Test Case",
643             f"Avg({table[u'reference'][u'title']})",
644             f"Stdev({table[u'reference'][u'title']})",
645             f"Avg({table[u'compare'][u'title']})",
646             f"Stdev{table[u'compare'][u'title']})",
647             u"Diff",
648             u"Stdev(Diff)"
649         ]
650         header_str = u";".join(header) + u"\n"
651         legend = (
652             u"\nLegend:\n"
653             f"Avg({table[u'reference'][u'title']}): "
654             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
655             f"from a series of runs of the listed tests.\n"
656             f"Stdev({table[u'reference'][u'title']}): "
657             f"Standard deviation value of {table[u'reference'][u'title']} "
658             f"[Mpps] computed from a series of runs of the listed tests.\n"
659             f"Avg({table[u'compare'][u'title']}): "
660             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
661             f"a series of runs of the listed tests.\n"
662             f"Stdev({table[u'compare'][u'title']}): "
663             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
664             f"computed from a series of runs of the listed tests.\n"
665             f"Diff({table[u'reference'][u'title']},"
666             f"{table[u'compare'][u'title']}): "
667             f"Percentage change calculated for mean values.\n"
668             u"Stdev(Diff): "
669             u"Standard deviation of percentage change calculated for mean "
670             u"values.\n"
671             u":END"
672         )
673     except (AttributeError, KeyError) as err:
674         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
675         return
676
677     # Create a list of available SOAK test results:
678     tbl_dict = dict()
679     for job, builds in table[u"compare"][u"data"].items():
680         for build in builds:
681             for tst_name, tst_data in data[job][str(build)].items():
682                 if tst_data[u"type"] == u"SOAK":
683                     tst_name_mod = tst_name.replace(u"-soak", u"")
684                     if tbl_dict.get(tst_name_mod, None) is None:
685                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
686                         nic = groups.group(0) if groups else u""
687                         name = (
688                             f"{nic}-"
689                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
690                         )
691                         tbl_dict[tst_name_mod] = {
692                             u"name": name,
693                             u"ref-data": list(),
694                             u"cmp-data": list()
695                         }
696                     try:
697                         tbl_dict[tst_name_mod][u"cmp-data"].append(
698                             tst_data[u"throughput"][u"LOWER"])
699                     except (KeyError, TypeError):
700                         pass
701     tests_lst = tbl_dict.keys()
702
703     # Add corresponding NDR test results:
704     for job, builds in table[u"reference"][u"data"].items():
705         for build in builds:
706             for tst_name, tst_data in data[job][str(build)].items():
707                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
708                     replace(u"-mrr", u"")
709                 if tst_name_mod not in tests_lst:
710                     continue
711                 try:
712                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
713                         continue
714                     if table[u"include-tests"] == u"MRR":
715                         result = (tst_data[u"result"][u"receive-rate"],
716                                   tst_data[u"result"][u"receive-stdev"])
717                     elif table[u"include-tests"] == u"PDR":
718                         result = \
719                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
720                     elif table[u"include-tests"] == u"NDR":
721                         result = \
722                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
723                     else:
724                         result = None
725                     if result is not None:
726                         tbl_dict[tst_name_mod][u"ref-data"].append(
727                             result)
728                 except (KeyError, TypeError):
729                     continue
730
731     tbl_lst = list()
732     for tst_name in tbl_dict:
733         item = [tbl_dict[tst_name][u"name"], ]
734         data_r = tbl_dict[tst_name][u"ref-data"]
735         if data_r:
736             if table[u"include-tests"] == u"MRR":
737                 data_r_mean = data_r[0][0]
738                 data_r_stdev = data_r[0][1]
739             else:
740                 data_r_mean = mean(data_r)
741                 data_r_stdev = stdev(data_r)
742             item.append(round(data_r_mean / 1e6, 1))
743             item.append(round(data_r_stdev / 1e6, 1))
744         else:
745             data_r_mean = None
746             data_r_stdev = None
747             item.extend([None, None])
748         data_c = tbl_dict[tst_name][u"cmp-data"]
749         if data_c:
750             if table[u"include-tests"] == u"MRR":
751                 data_c_mean = data_c[0][0]
752                 data_c_stdev = data_c[0][1]
753             else:
754                 data_c_mean = mean(data_c)
755                 data_c_stdev = stdev(data_c)
756             item.append(round(data_c_mean / 1e6, 1))
757             item.append(round(data_c_stdev / 1e6, 1))
758         else:
759             data_c_mean = None
760             data_c_stdev = None
761             item.extend([None, None])
762         if data_r_mean is not None and data_c_mean is not None:
763             delta, d_stdev = relative_change_stdev(
764                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
765             try:
766                 item.append(round(delta))
767             except ValueError:
768                 item.append(delta)
769             try:
770                 item.append(round(d_stdev))
771             except ValueError:
772                 item.append(d_stdev)
773             tbl_lst.append(item)
774
775     # Sort the table according to the relative change
776     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
777
778     # Generate csv tables:
779     csv_file = f"{table[u'output-file']}.csv"
780     with open(csv_file, u"wt") as file_handler:
781         file_handler.write(header_str)
782         for test in tbl_lst:
783             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
784
785     convert_csv_to_pretty_txt(
786         csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
787     )
788     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
789         txt_file.write(legend)
790
791     # Generate html table:
792     _tpc_generate_html_table(
793         header,
794         tbl_lst,
795         table[u'output-file'],
796         legend=legend,
797         title=table.get(u"title", u"")
798     )
799
800
801 def table_perf_trending_dash(table, input_data):
802     """Generate the table(s) with algorithm:
803     table_perf_trending_dash
804     specified in the specification file.
805
806     :param table: Table to generate.
807     :param input_data: Data to process.
808     :type table: pandas.Series
809     :type input_data: InputData
810     """
811
812     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
813
814     # Transform the data
815     logging.info(
816         f"    Creating the data set for the {table.get(u'type', u'')} "
817         f"{table.get(u'title', u'')}."
818     )
819     data = input_data.filter_data(table, continue_on_error=True)
820
821     # Prepare the header of the tables
822     header = [
823         u"Test Case",
824         u"Trend [Mpps]",
825         u"Short-Term Change [%]",
826         u"Long-Term Change [%]",
827         u"Regressions [#]",
828         u"Progressions [#]"
829     ]
830     header_str = u",".join(header) + u"\n"
831
832     # Prepare data to the table:
833     tbl_dict = dict()
834     for job, builds in table[u"data"].items():
835         for build in builds:
836             for tst_name, tst_data in data[job][str(build)].items():
837                 if tst_name.lower() in table.get(u"ignore-list", list()):
838                     continue
839                 if tbl_dict.get(tst_name, None) is None:
840                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
841                     if not groups:
842                         continue
843                     nic = groups.group(0)
844                     tbl_dict[tst_name] = {
845                         u"name": f"{nic}-{tst_data[u'name']}",
846                         u"data": OrderedDict()
847                     }
848                 try:
849                     tbl_dict[tst_name][u"data"][str(build)] = \
850                         tst_data[u"result"][u"receive-rate"]
851                 except (TypeError, KeyError):
852                     pass  # No data in output.xml for this test
853
854     tbl_lst = list()
855     for tst_name in tbl_dict:
856         data_t = tbl_dict[tst_name][u"data"]
857         if len(data_t) < 2:
858             continue
859
860         classification_lst, avgs = classify_anomalies(data_t)
861
862         win_size = min(len(data_t), table[u"window"])
863         long_win_size = min(len(data_t), table[u"long-trend-window"])
864
865         try:
866             max_long_avg = max(
867                 [x for x in avgs[-long_win_size:-win_size]
868                  if not isnan(x)])
869         except ValueError:
870             max_long_avg = nan
871         last_avg = avgs[-1]
872         avg_week_ago = avgs[max(-win_size, -len(avgs))]
873
874         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
875             rel_change_last = nan
876         else:
877             rel_change_last = round(
878                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
879
880         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
881             rel_change_long = nan
882         else:
883             rel_change_long = round(
884                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
885
886         if classification_lst:
887             if isnan(rel_change_last) and isnan(rel_change_long):
888                 continue
889             if isnan(last_avg) or isnan(rel_change_last) or \
890                     isnan(rel_change_long):
891                 continue
892             tbl_lst.append(
893                 [tbl_dict[tst_name][u"name"],
894                  round(last_avg / 1e6, 2),
895                  rel_change_last,
896                  rel_change_long,
897                  classification_lst[-win_size:].count(u"regression"),
898                  classification_lst[-win_size:].count(u"progression")])
899
900     tbl_lst.sort(key=lambda rel: rel[0])
901
902     tbl_sorted = list()
903     for nrr in range(table[u"window"], -1, -1):
904         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
905         for nrp in range(table[u"window"], -1, -1):
906             tbl_out = [item for item in tbl_reg if item[5] == nrp]
907             tbl_out.sort(key=lambda rel: rel[2])
908             tbl_sorted.extend(tbl_out)
909
910     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
911
912     logging.info(f"    Writing file: {file_name}")
913     with open(file_name, u"wt") as file_handler:
914         file_handler.write(header_str)
915         for test in tbl_sorted:
916             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
917
918     logging.info(f"    Writing file: {table[u'output-file']}.txt")
919     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
920
921
922 def _generate_url(testbed, test_name):
923     """Generate URL to a trending plot from the name of the test case.
924
925     :param testbed: The testbed used for testing.
926     :param test_name: The name of the test case.
927     :type testbed: str
928     :type test_name: str
929     :returns: The URL to the plot with the trending data for the given test
930         case.
931     :rtype str
932     """
933
934     if u"x520" in test_name:
935         nic = u"x520"
936     elif u"x710" in test_name:
937         nic = u"x710"
938     elif u"xl710" in test_name:
939         nic = u"xl710"
940     elif u"xxv710" in test_name:
941         nic = u"xxv710"
942     elif u"vic1227" in test_name:
943         nic = u"vic1227"
944     elif u"vic1385" in test_name:
945         nic = u"vic1385"
946     elif u"x553" in test_name:
947         nic = u"x553"
948     else:
949         nic = u""
950
951     if u"64b" in test_name:
952         frame_size = u"64b"
953     elif u"78b" in test_name:
954         frame_size = u"78b"
955     elif u"imix" in test_name:
956         frame_size = u"imix"
957     elif u"9000b" in test_name:
958         frame_size = u"9000b"
959     elif u"1518b" in test_name:
960         frame_size = u"1518b"
961     elif u"114b" in test_name:
962         frame_size = u"114b"
963     else:
964         frame_size = u""
965
966     if u"1t1c" in test_name or \
967         (u"-1c-" in test_name and
968          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
969         cores = u"1t1c"
970     elif u"2t2c" in test_name or \
971          (u"-2c-" in test_name and
972           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
973         cores = u"2t2c"
974     elif u"4t4c" in test_name or \
975          (u"-4c-" in test_name and
976           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
977         cores = u"4t4c"
978     elif u"2t1c" in test_name or \
979          (u"-1c-" in test_name and
980           testbed in (u"2n-skx", u"3n-skx")):
981         cores = u"2t1c"
982     elif u"4t2c" in test_name:
983         cores = u"4t2c"
984     elif u"8t4c" in test_name:
985         cores = u"8t4c"
986     else:
987         cores = u""
988
989     if u"testpmd" in test_name:
990         driver = u"testpmd"
991     elif u"l3fwd" in test_name:
992         driver = u"l3fwd"
993     elif u"avf" in test_name:
994         driver = u"avf"
995     elif u"dnv" in testbed or u"tsh" in testbed:
996         driver = u"ixgbe"
997     else:
998         driver = u"dpdk"
999
1000     if u"acl" in test_name or \
1001             u"macip" in test_name or \
1002             u"nat" in test_name or \
1003             u"policer" in test_name or \
1004             u"cop" in test_name:
1005         bsf = u"features"
1006     elif u"scale" in test_name:
1007         bsf = u"scale"
1008     elif u"base" in test_name:
1009         bsf = u"base"
1010     else:
1011         bsf = u"base"
1012
1013     if u"114b" in test_name and u"vhost" in test_name:
1014         domain = u"vts"
1015     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1016         domain = u"dpdk"
1017     elif u"memif" in test_name:
1018         domain = u"container_memif"
1019     elif u"srv6" in test_name:
1020         domain = u"srv6"
1021     elif u"vhost" in test_name:
1022         domain = u"vhost"
1023         if u"vppl2xc" in test_name:
1024             driver += u"-vpp"
1025         else:
1026             driver += u"-testpmd"
1027         if u"lbvpplacp" in test_name:
1028             bsf += u"-link-bonding"
1029     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1030         domain = u"nf_service_density_vnfc"
1031     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1032         domain = u"nf_service_density_cnfc"
1033     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1034         domain = u"nf_service_density_cnfp"
1035     elif u"ipsec" in test_name:
1036         domain = u"ipsec"
1037         if u"sw" in test_name:
1038             bsf += u"-sw"
1039         elif u"hw" in test_name:
1040             bsf += u"-hw"
1041     elif u"ethip4vxlan" in test_name:
1042         domain = u"ip4_tunnels"
1043     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1044         domain = u"ip4"
1045     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1046         domain = u"ip6"
1047     elif u"l2xcbase" in test_name or \
1048             u"l2xcscale" in test_name or \
1049             u"l2bdbasemaclrn" in test_name or \
1050             u"l2bdscale" in test_name or \
1051             u"l2patch" in test_name:
1052         domain = u"l2"
1053     else:
1054         domain = u""
1055
1056     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1057     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1058
1059     return file_name + anchor_name
1060
1061
1062 def table_perf_trending_dash_html(table, input_data):
1063     """Generate the table(s) with algorithm:
1064     table_perf_trending_dash_html specified in the specification
1065     file.
1066
1067     :param table: Table to generate.
1068     :param input_data: Data to process.
1069     :type table: dict
1070     :type input_data: InputData
1071     """
1072
1073     _ = input_data
1074
1075     if not table.get(u"testbed", None):
1076         logging.error(
1077             f"The testbed is not defined for the table "
1078             f"{table.get(u'title', u'')}."
1079         )
1080         return
1081
1082     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1083
1084     try:
1085         with open(table[u"input-file"], u'rt') as csv_file:
1086             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1087     except KeyError:
1088         logging.warning(u"The input file is not defined.")
1089         return
1090     except csv.Error as err:
1091         logging.warning(
1092             f"Not possible to process the file {table[u'input-file']}.\n"
1093             f"{repr(err)}"
1094         )
1095         return
1096
1097     # Table:
1098     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1099
1100     # Table header:
1101     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1102     for idx, item in enumerate(csv_lst[0]):
1103         alignment = u"left" if idx == 0 else u"center"
1104         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1105         thead.text = item
1106
1107     # Rows:
1108     colors = {
1109         u"regression": (
1110             u"#ffcccc",
1111             u"#ff9999"
1112         ),
1113         u"progression": (
1114             u"#c6ecc6",
1115             u"#9fdf9f"
1116         ),
1117         u"normal": (
1118             u"#e9f1fb",
1119             u"#d4e4f7"
1120         )
1121     }
1122     for r_idx, row in enumerate(csv_lst[1:]):
1123         if int(row[4]):
1124             color = u"regression"
1125         elif int(row[5]):
1126             color = u"progression"
1127         else:
1128             color = u"normal"
1129         trow = ET.SubElement(
1130             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1131         )
1132
1133         # Columns:
1134         for c_idx, item in enumerate(row):
1135             tdata = ET.SubElement(
1136                 trow,
1137                 u"td",
1138                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1139             )
1140             # Name:
1141             if c_idx == 0:
1142                 ref = ET.SubElement(
1143                     tdata,
1144                     u"a",
1145                     attrib=dict(
1146                         href=f"../trending/"
1147                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1148                     )
1149                 )
1150                 ref.text = item
1151             else:
1152                 tdata.text = item
1153     try:
1154         with open(table[u"output-file"], u'w') as html_file:
1155             logging.info(f"    Writing file: {table[u'output-file']}")
1156             html_file.write(u".. raw:: html\n\n\t")
1157             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1158             html_file.write(u"\n\t<p><br><br></p>\n")
1159     except KeyError:
1160         logging.warning(u"The output file is not defined.")
1161         return
1162
1163
1164 def table_last_failed_tests(table, input_data):
1165     """Generate the table(s) with algorithm: table_last_failed_tests
1166     specified in the specification file.
1167
1168     :param table: Table to generate.
1169     :param input_data: Data to process.
1170     :type table: pandas.Series
1171     :type input_data: InputData
1172     """
1173
1174     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1175
1176     # Transform the data
1177     logging.info(
1178         f"    Creating the data set for the {table.get(u'type', u'')} "
1179         f"{table.get(u'title', u'')}."
1180     )
1181
1182     data = input_data.filter_data(table, continue_on_error=True)
1183
1184     if data is None or data.empty:
1185         logging.warning(
1186             f"    No data for the {table.get(u'type', u'')} "
1187             f"{table.get(u'title', u'')}."
1188         )
1189         return
1190
1191     tbl_list = list()
1192     for job, builds in table[u"data"].items():
1193         for build in builds:
1194             build = str(build)
1195             try:
1196                 version = input_data.metadata(job, build).get(u"version", u"")
1197             except KeyError:
1198                 logging.error(f"Data for {job}: {build} is not present.")
1199                 return
1200             tbl_list.append(build)
1201             tbl_list.append(version)
1202             failed_tests = list()
1203             passed = 0
1204             failed = 0
1205             for tst_data in data[job][build].values:
1206                 if tst_data[u"status"] != u"FAIL":
1207                     passed += 1
1208                     continue
1209                 failed += 1
1210                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1211                 if not groups:
1212                     continue
1213                 nic = groups.group(0)
1214                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1215             tbl_list.append(str(passed))
1216             tbl_list.append(str(failed))
1217             tbl_list.extend(failed_tests)
1218
1219     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1220     logging.info(f"    Writing file: {file_name}")
1221     with open(file_name, u"wt") as file_handler:
1222         for test in tbl_list:
1223             file_handler.write(test + u'\n')
1224
1225
1226 def table_failed_tests(table, input_data):
1227     """Generate the table(s) with algorithm: table_failed_tests
1228     specified in the specification file.
1229
1230     :param table: Table to generate.
1231     :param input_data: Data to process.
1232     :type table: pandas.Series
1233     :type input_data: InputData
1234     """
1235
1236     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1237
1238     # Transform the data
1239     logging.info(
1240         f"    Creating the data set for the {table.get(u'type', u'')} "
1241         f"{table.get(u'title', u'')}."
1242     )
1243     data = input_data.filter_data(table, continue_on_error=True)
1244
1245     # Prepare the header of the tables
1246     header = [
1247         u"Test Case",
1248         u"Failures [#]",
1249         u"Last Failure [Time]",
1250         u"Last Failure [VPP-Build-Id]",
1251         u"Last Failure [CSIT-Job-Build-Id]"
1252     ]
1253
1254     # Generate the data for the table according to the model in the table
1255     # specification
1256
1257     now = dt.utcnow()
1258     timeperiod = timedelta(int(table.get(u"window", 7)))
1259
1260     tbl_dict = dict()
1261     for job, builds in table[u"data"].items():
1262         for build in builds:
1263             build = str(build)
1264             for tst_name, tst_data in data[job][build].items():
1265                 if tst_name.lower() in table.get(u"ignore-list", list()):
1266                     continue
1267                 if tbl_dict.get(tst_name, None) is None:
1268                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1269                     if not groups:
1270                         continue
1271                     nic = groups.group(0)
1272                     tbl_dict[tst_name] = {
1273                         u"name": f"{nic}-{tst_data[u'name']}",
1274                         u"data": OrderedDict()
1275                     }
1276                 try:
1277                     generated = input_data.metadata(job, build).\
1278                         get(u"generated", u"")
1279                     if not generated:
1280                         continue
1281                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1282                     if (now - then) <= timeperiod:
1283                         tbl_dict[tst_name][u"data"][build] = (
1284                             tst_data[u"status"],
1285                             generated,
1286                             input_data.metadata(job, build).get(u"version",
1287                                                                 u""),
1288                             build
1289                         )
1290                 except (TypeError, KeyError) as err:
1291                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1292
1293     max_fails = 0
1294     tbl_lst = list()
1295     for tst_data in tbl_dict.values():
1296         fails_nr = 0
1297         fails_last_date = u""
1298         fails_last_vpp = u""
1299         fails_last_csit = u""
1300         for val in tst_data[u"data"].values():
1301             if val[0] == u"FAIL":
1302                 fails_nr += 1
1303                 fails_last_date = val[1]
1304                 fails_last_vpp = val[2]
1305                 fails_last_csit = val[3]
1306         if fails_nr:
1307             max_fails = fails_nr if fails_nr > max_fails else max_fails
1308             tbl_lst.append(
1309                 [
1310                     tst_data[u"name"],
1311                     fails_nr,
1312                     fails_last_date,
1313                     fails_last_vpp,
1314                     f"mrr-daily-build-{fails_last_csit}"
1315                 ]
1316             )
1317
1318     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1319     tbl_sorted = list()
1320     for nrf in range(max_fails, -1, -1):
1321         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1322         tbl_sorted.extend(tbl_fails)
1323
1324     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1325     logging.info(f"    Writing file: {file_name}")
1326     with open(file_name, u"wt") as file_handler:
1327         file_handler.write(u",".join(header) + u"\n")
1328         for test in tbl_sorted:
1329             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1330
1331     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1332     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1333
1334
1335 def table_failed_tests_html(table, input_data):
1336     """Generate the table(s) with algorithm: table_failed_tests_html
1337     specified in the specification file.
1338
1339     :param table: Table to generate.
1340     :param input_data: Data to process.
1341     :type table: pandas.Series
1342     :type input_data: InputData
1343     """
1344
1345     _ = input_data
1346
1347     if not table.get(u"testbed", None):
1348         logging.error(
1349             f"The testbed is not defined for the table "
1350             f"{table.get(u'title', u'')}."
1351         )
1352         return
1353
1354     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1355
1356     try:
1357         with open(table[u"input-file"], u'rt') as csv_file:
1358             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1359     except KeyError:
1360         logging.warning(u"The input file is not defined.")
1361         return
1362     except csv.Error as err:
1363         logging.warning(
1364             f"Not possible to process the file {table[u'input-file']}.\n"
1365             f"{repr(err)}"
1366         )
1367         return
1368
1369     # Table:
1370     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1371
1372     # Table header:
1373     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1374     for idx, item in enumerate(csv_lst[0]):
1375         alignment = u"left" if idx == 0 else u"center"
1376         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1377         thead.text = item
1378
1379     # Rows:
1380     colors = (u"#e9f1fb", u"#d4e4f7")
1381     for r_idx, row in enumerate(csv_lst[1:]):
1382         background = colors[r_idx % 2]
1383         trow = ET.SubElement(
1384             failed_tests, u"tr", attrib=dict(bgcolor=background)
1385         )
1386
1387         # Columns:
1388         for c_idx, item in enumerate(row):
1389             tdata = ET.SubElement(
1390                 trow,
1391                 u"td",
1392                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1393             )
1394             # Name:
1395             if c_idx == 0:
1396                 ref = ET.SubElement(
1397                     tdata,
1398                     u"a",
1399                     attrib=dict(
1400                         href=f"../trending/"
1401                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1402                     )
1403                 )
1404                 ref.text = item
1405             else:
1406                 tdata.text = item
1407     try:
1408         with open(table[u"output-file"], u'w') as html_file:
1409             logging.info(f"    Writing file: {table[u'output-file']}")
1410             html_file.write(u".. raw:: html\n\n\t")
1411             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1412             html_file.write(u"\n\t<p><br><br></p>\n")
1413     except KeyError:
1414         logging.warning(u"The output file is not defined.")
1415         return
1416
1417
1418 def table_comparison(table, input_data):
1419     """Generate the table(s) with algorithm: table_comparison
1420     specified in the specification file.
1421
1422     :param table: Table to generate.
1423     :param input_data: Data to process.
1424     :type table: pandas.Series
1425     :type input_data: InputData
1426     """
1427     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1428
1429     # Transform the data
1430     logging.info(
1431         f"    Creating the data set for the {table.get(u'type', u'')} "
1432         f"{table.get(u'title', u'')}."
1433     )
1434
1435     columns = table.get(u"columns", None)
1436     if not columns:
1437         logging.error(
1438             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1439         )
1440         return
1441
1442     cols = list()
1443     for idx, col in enumerate(columns):
1444         if col.get(u"data-set", None) is None:
1445             logging.warning(f"No data for column {col.get(u'title', u'')}")
1446             continue
1447         tag = col.get(u"tag", None)
1448         data = input_data.filter_data(
1449             table,
1450             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1451             data=col[u"data-set"],
1452             continue_on_error=True
1453         )
1454         col_data = {
1455             u"title": col.get(u"title", f"Column{idx}"),
1456             u"data": dict()
1457         }
1458         for builds in data.values:
1459             for build in builds:
1460                 for tst_name, tst_data in build.items():
1461                     if tag and tag not in tst_data[u"tags"]:
1462                         continue
1463                     tst_name_mod = \
1464                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1465                         replace(u"2n1l-", u"")
1466                     if col_data[u"data"].get(tst_name_mod, None) is None:
1467                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1468                         if u"across testbeds" in table[u"title"].lower() or \
1469                                 u"across topologies" in table[u"title"].lower():
1470                             name = _tpc_modify_displayed_test_name(name)
1471                         col_data[u"data"][tst_name_mod] = {
1472                             u"name": name,
1473                             u"replace": True,
1474                             u"data": list(),
1475                             u"mean": None,
1476                             u"stdev": None
1477                         }
1478                     _tpc_insert_data(
1479                         target=col_data[u"data"][tst_name_mod],
1480                         src=tst_data,
1481                         include_tests=table[u"include-tests"]
1482                     )
1483
1484         replacement = col.get(u"data-replacement", None)
1485         if replacement:
1486             rpl_data = input_data.filter_data(
1487                 table,
1488                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1489                 data=replacement,
1490                 continue_on_error=True
1491             )
1492             for builds in rpl_data.values:
1493                 for build in builds:
1494                     for tst_name, tst_data in build.items():
1495                         if tag and tag not in tst_data[u"tags"]:
1496                             continue
1497                         tst_name_mod = \
1498                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1499                             replace(u"2n1l-", u"")
1500                         if col_data[u"data"].get(tst_name_mod, None) is None:
1501                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1502                             if u"across testbeds" in table[u"title"].lower() \
1503                                     or u"across topologies" in \
1504                                     table[u"title"].lower():
1505                                 name = _tpc_modify_displayed_test_name(name)
1506                             col_data[u"data"][tst_name_mod] = {
1507                                 u"name": name,
1508                                 u"replace": False,
1509                                 u"data": list(),
1510                                 u"mean": None,
1511                                 u"stdev": None
1512                             }
1513                         if col_data[u"data"][tst_name_mod][u"replace"]:
1514                             col_data[u"data"][tst_name_mod][u"replace"] = False
1515                             col_data[u"data"][tst_name_mod][u"data"] = list()
1516                         _tpc_insert_data(
1517                             target=col_data[u"data"][tst_name_mod],
1518                             src=tst_data,
1519                             include_tests=table[u"include-tests"]
1520                         )
1521
1522         if table[u"include-tests"] in (u"NDR", u"PDR"):
1523             for tst_name, tst_data in col_data[u"data"].items():
1524                 if tst_data[u"data"]:
1525                     tst_data[u"mean"] = mean(tst_data[u"data"])
1526                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1527
1528         cols.append(col_data)
1529
1530     tbl_dict = dict()
1531     for col in cols:
1532         for tst_name, tst_data in col[u"data"].items():
1533             if tbl_dict.get(tst_name, None) is None:
1534                 tbl_dict[tst_name] = {
1535                     "name": tst_data[u"name"]
1536                 }
1537             tbl_dict[tst_name][col[u"title"]] = {
1538                 u"mean": tst_data[u"mean"],
1539                 u"stdev": tst_data[u"stdev"]
1540             }
1541
1542     if not tbl_dict:
1543         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1544         return
1545
1546     tbl_lst = list()
1547     for tst_data in tbl_dict.values():
1548         row = [tst_data[u"name"], ]
1549         for col in cols:
1550             row.append(tst_data.get(col[u"title"], None))
1551         tbl_lst.append(row)
1552
1553     comparisons = table.get(u"comparisons", None)
1554     if comparisons and isinstance(comparisons, list):
1555         for idx, comp in enumerate(comparisons):
1556             try:
1557                 col_ref = int(comp[u"reference"])
1558                 col_cmp = int(comp[u"compare"])
1559             except KeyError:
1560                 logging.warning(u"Comparison: No references defined! Skipping.")
1561                 comparisons.pop(idx)
1562                 continue
1563             if not (0 < col_ref <= len(cols) and
1564                     0 < col_cmp <= len(cols)) or \
1565                     col_ref == col_cmp:
1566                 logging.warning(f"Wrong values of reference={col_ref} "
1567                                 f"and/or compare={col_cmp}. Skipping.")
1568                 comparisons.pop(idx)
1569                 continue
1570
1571     tbl_cmp_lst = list()
1572     if comparisons:
1573         for row in tbl_lst:
1574             new_row = deepcopy(row)
1575             add_to_tbl = False
1576             for comp in comparisons:
1577                 ref_itm = row[int(comp[u"reference"])]
1578                 if ref_itm is None and \
1579                         comp.get(u"reference-alt", None) is not None:
1580                     ref_itm = row[int(comp[u"reference-alt"])]
1581                 cmp_itm = row[int(comp[u"compare"])]
1582                 if ref_itm is not None and cmp_itm is not None and \
1583                         ref_itm[u"mean"] is not None and \
1584                         cmp_itm[u"mean"] is not None and \
1585                         ref_itm[u"stdev"] is not None and \
1586                         cmp_itm[u"stdev"] is not None:
1587                     delta, d_stdev = relative_change_stdev(
1588                         ref_itm[u"mean"], cmp_itm[u"mean"],
1589                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
1590                     )
1591                     new_row.append(
1592                         {
1593                             u"mean": delta * 1e6,
1594                             u"stdev": d_stdev * 1e6
1595                         }
1596                     )
1597                     add_to_tbl = True
1598                 else:
1599                     new_row.append(None)
1600             if add_to_tbl:
1601                 tbl_cmp_lst.append(new_row)
1602
1603     tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1604     tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1605
1606     rcas = list()
1607     rca_in = table.get(u"rca", None)
1608     if rca_in and isinstance(rca_in, list):
1609         for idx, itm in enumerate(rca_in):
1610             try:
1611                 with open(itm.get(u"data", u""), u"r") as rca_file:
1612                     rcas.append(
1613                         {
1614                             u"title": itm.get(u"title", f"RCA{idx}"),
1615                             u"data": load(rca_file, Loader=FullLoader)
1616                         }
1617                     )
1618             except (YAMLError, IOError) as err:
1619                 logging.warning(
1620                     f"The RCA file {itm.get(u'data', u'')} does not exist or "
1621                     f"it is corrupted!"
1622                 )
1623                 logging.debug(repr(err))
1624
1625     tbl_for_csv = list()
1626     for line in tbl_cmp_lst:
1627         row = [line[0], ]
1628         for idx, itm in enumerate(line[1:]):
1629             if itm is None:
1630                 row.append(u"NT")
1631                 row.append(u"NT")
1632             else:
1633                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1634                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1635         for rca in rcas:
1636             rca_nr = rca[u"data"].get(row[0], u"-")
1637             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1638         tbl_for_csv.append(row)
1639
1640     header_csv = [u"Test Case", ]
1641     for col in cols:
1642         header_csv.append(f"Avg({col[u'title']})")
1643         header_csv.append(f"Stdev({col[u'title']})")
1644     for comp in comparisons:
1645         header_csv.append(
1646             f"Avg({comp.get(u'title', u'')})"
1647         )
1648         header_csv.append(
1649             f"Stdev({comp.get(u'title', u'')})"
1650         )
1651     header_csv.extend([rca[u"title"] for rca in rcas])
1652
1653     legend_lst = table.get(u"legend", None)
1654     if legend_lst is None:
1655         legend = u""
1656     else:
1657         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1658
1659     footnote = u""
1660     for rca in rcas:
1661         footnote += f"\n{rca[u'title']}:\n"
1662         footnote += rca[u"data"].get(u"footnote", u"")
1663
1664     csv_file = f"{table[u'output-file']}-csv.csv"
1665     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
1666         file_handler.write(
1667             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1668         )
1669         for test in tbl_for_csv:
1670             file_handler.write(
1671                 u",".join([f'"{item}"' for item in test]) + u"\n"
1672             )
1673         if legend_lst:
1674             for item in legend_lst:
1675                 file_handler.write(f'"{item}"\n')
1676         if footnote:
1677             for itm in footnote.split(u"\n"):
1678                 file_handler.write(f'"{itm}"\n')
1679
1680     tbl_tmp = list()
1681     max_lens = [0, ] * len(tbl_cmp_lst[0])
1682     for line in tbl_cmp_lst:
1683         row = [line[0], ]
1684         for idx, itm in enumerate(line[1:]):
1685             if itm is None:
1686                 new_itm = u"NT"
1687             else:
1688                 if idx < len(cols):
1689                     new_itm = (
1690                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1691                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1692                         replace(u"nan", u"NaN")
1693                     )
1694                 else:
1695                     new_itm = (
1696                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1697                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1698                         replace(u"nan", u"NaN")
1699                     )
1700             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1701                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1702             row.append(new_itm)
1703
1704         tbl_tmp.append(row)
1705
1706     tbl_final = list()
1707     for line in tbl_tmp:
1708         row = [line[0], ]
1709         for idx, itm in enumerate(line[1:]):
1710             if itm in (u"NT", u"NaN"):
1711                 row.append(itm)
1712                 continue
1713             itm_lst = itm.rsplit(u"\u00B1", 1)
1714             itm_lst[-1] = \
1715                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1716             row.append(u"\u00B1".join(itm_lst))
1717         for rca in rcas:
1718             rca_nr = rca[u"data"].get(row[0], u"-")
1719             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1720
1721         tbl_final.append(row)
1722
1723     header = [u"Test Case", ]
1724     header.extend([col[u"title"] for col in cols])
1725     header.extend([comp.get(u"title", u"") for comp in comparisons])
1726     header.extend([rca[u"title"] for rca in rcas])
1727
1728     # Generate csv tables:
1729     csv_file = f"{table[u'output-file']}.csv"
1730     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
1731         file_handler.write(u";".join(header) + u"\n")
1732         for test in tbl_final:
1733             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1734
1735     # Generate txt table:
1736     txt_file_name = f"{table[u'output-file']}.txt"
1737     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1738
1739     with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
1740         txt_file.write(legend)
1741         txt_file.write(footnote)
1742         if legend or footnote:
1743             txt_file.write(u"\n:END")
1744
1745     # Generate html table:
1746     _tpc_generate_html_table(
1747         header,
1748         tbl_final,
1749         table[u'output-file'],
1750         legend=legend,
1751         footnote=footnote,
1752         sort_data=False,
1753         title=table.get(u"title", u"")
1754     )
1755
1756
1757 def table_weekly_comparison(table, in_data):
1758     """Generate the table(s) with algorithm: table_weekly_comparison
1759     specified in the specification file.
1760
1761     :param table: Table to generate.
1762     :param in_data: Data to process.
1763     :type table: pandas.Series
1764     :type in_data: InputData
1765     """
1766     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1767
1768     # Transform the data
1769     logging.info(
1770         f"    Creating the data set for the {table.get(u'type', u'')} "
1771         f"{table.get(u'title', u'')}."
1772     )
1773
1774     incl_tests = table.get(u"include-tests", None)
1775     if incl_tests not in (u"NDR", u"PDR"):
1776         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1777         return
1778
1779     nr_cols = table.get(u"nr-of-data-columns", None)
1780     if not nr_cols or nr_cols < 2:
1781         logging.error(
1782             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1783         )
1784         return
1785
1786     data = in_data.filter_data(
1787         table,
1788         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1789         continue_on_error=True
1790     )
1791
1792     header = [
1793         [u"Version"],
1794         [u"Date", ],
1795         [u"Build", ],
1796         [u"Testbed", ]
1797     ]
1798     tbl_dict = dict()
1799     idx = 0
1800     tb_tbl = table.get(u"testbeds", None)
1801     for job_name, job_data in data.items():
1802         for build_nr, build in job_data.items():
1803             if idx >= nr_cols:
1804                 break
1805             if build.empty:
1806                 continue
1807
1808             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1809             if tb_ip and tb_tbl:
1810                 testbed = tb_tbl.get(tb_ip, u"")
1811             else:
1812                 testbed = u""
1813             header[2].insert(1, build_nr)
1814             header[3].insert(1, testbed)
1815             header[1].insert(
1816                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
1817             )
1818             header[0].insert(
1819                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
1820             )
1821
1822             for tst_name, tst_data in build.items():
1823                 tst_name_mod = \
1824                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
1825                 if not tbl_dict.get(tst_name_mod, None):
1826                     tbl_dict[tst_name_mod] = dict(
1827                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
1828                     )
1829                 try:
1830                     tbl_dict[tst_name_mod][-idx - 1] = \
1831                         tst_data[u"throughput"][incl_tests][u"LOWER"]
1832                 except (TypeError, IndexError, KeyError, ValueError):
1833                     pass
1834             idx += 1
1835
1836     if idx < nr_cols:
1837         logging.error(u"Not enough data to build the table! Skipping")
1838         return
1839
1840     cmp_dict = dict()
1841     for idx, cmp in enumerate(table.get(u"comparisons", list())):
1842         idx_ref = cmp.get(u"reference", None)
1843         idx_cmp = cmp.get(u"compare", None)
1844         if idx_ref is None or idx_cmp is None:
1845             continue
1846         header[0].append(f"Diff{idx + 1}")
1847         header[1].append(header[0][idx_ref - idx - 1])
1848         header[2].append(u"vs")
1849         header[3].append(header[0][idx_cmp - idx - 1])
1850         for tst_name, tst_data in tbl_dict.items():
1851             if not cmp_dict.get(tst_name, None):
1852                 cmp_dict[tst_name] = list()
1853             ref_data = tst_data.get(idx_ref, None)
1854             cmp_data = tst_data.get(idx_cmp, None)
1855             if ref_data is None or cmp_data is None:
1856                 cmp_dict[tst_name].append(float('nan'))
1857             else:
1858                 cmp_dict[tst_name].append(
1859                     relative_change(ref_data, cmp_data)
1860                 )
1861
1862     tbl_lst = list()
1863     for tst_name, tst_data in tbl_dict.items():
1864         itm_lst = [tst_data[u"name"], ]
1865         for idx in range(nr_cols):
1866             item = tst_data.get(-idx - 1, None)
1867             if item is None:
1868                 itm_lst.insert(1, None)
1869             else:
1870                 itm_lst.insert(1, round(item / 1e6, 1))
1871         itm_lst.extend(
1872             [
1873                 None if itm is None else round(itm, 1)
1874                 for itm in cmp_dict[tst_name]
1875             ]
1876         )
1877         tbl_lst.append(itm_lst)
1878
1879     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
1880     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1881
1882     # Generate csv table:
1883     csv_file = f"{table[u'output-file']}.csv"
1884     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
1885         for hdr in header:
1886             file_handler.write(u",".join(hdr) + u"\n")
1887         for test in tbl_lst:
1888             file_handler.write(u",".join(
1889                 [
1890                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
1891                     replace(u"null", u"-") for item in test
1892                 ]
1893             ) + u"\n")
1894
1895     txt_file = f"{table[u'output-file']}.txt"
1896     convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
1897
1898     # Reorganize header in txt table
1899     txt_table = list()
1900     with open(txt_file, u"rt", encoding='utf-8') as file_handler:
1901         for line in file_handler:
1902             txt_table.append(line)
1903     try:
1904         txt_table.insert(5, txt_table.pop(2))
1905         with open(txt_file, u"wt", encoding='utf-8') as file_handler:
1906             file_handler.writelines(txt_table)
1907     except IndexError:
1908         pass
1909
1910     # Generate html table:
1911     hdr_html = [
1912         u"<br>".join(row) for row in zip(*header)
1913     ]
1914     _tpc_generate_html_table(
1915         hdr_html,
1916         tbl_lst,
1917         table[u'output-file'],
1918         sort_data=True,
1919         title=table.get(u"title", u""),
1920         generate_rst=False
1921     )