JSON: Implement latest changes
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
30 import pandas as pd
31
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
34
35 from pal_utils import mean, stdev, classify_anomalies, \
36     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
37
38
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40
41
42 def generate_tables(spec, data):
43     """Generate all tables specified in the specification file.
44
45     :param spec: Specification read from the specification file.
46     :param data: Data to process.
47     :type spec: Specification
48     :type data: InputData
49     """
50
51     generator = {
52         u"table_merged_details": table_merged_details,
53         u"table_soak_vs_ndr": table_soak_vs_ndr,
54         u"table_perf_trending_dash": table_perf_trending_dash,
55         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
56         u"table_last_failed_tests": table_last_failed_tests,
57         u"table_failed_tests": table_failed_tests,
58         u"table_failed_tests_html": table_failed_tests_html,
59         u"table_oper_data_html": table_oper_data_html,
60         u"table_comparison": table_comparison,
61         u"table_weekly_comparison": table_weekly_comparison
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             if table[u"algorithm"] == u"table_weekly_comparison":
68                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
69             generator[table[u"algorithm"]](table, data)
70         except NameError as err:
71             logging.error(
72                 f"Probably algorithm {table[u'algorithm']} is not defined: "
73                 f"{repr(err)}"
74             )
75     logging.info(u"Done.")
76
77
78 def table_oper_data_html(table, input_data):
79     """Generate the table(s) with algorithm: html_table_oper_data
80     specified in the specification file.
81
82     :param table: Table to generate.
83     :param input_data: Data to process.
84     :type table: pandas.Series
85     :type input_data: InputData
86     """
87
88     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
89     # Transform the data
90     logging.info(
91         f"    Creating the data set for the {table.get(u'type', u'')} "
92         f"{table.get(u'title', u'')}."
93     )
94     data = input_data.filter_data(
95         table,
96         params=[u"name", u"parent", u"show-run", u"type"],
97         continue_on_error=True
98     )
99     if data.empty:
100         return
101     data = input_data.merge_data(data)
102
103     sort_tests = table.get(u"sort", None)
104     if sort_tests:
105         args = dict(
106             inplace=True,
107             ascending=(sort_tests == u"ascending")
108         )
109         data.sort_index(**args)
110
111     suites = input_data.filter_data(
112         table,
113         continue_on_error=True,
114         data_set=u"suites"
115     )
116     if suites.empty:
117         return
118     suites = input_data.merge_data(suites)
119
120     def _generate_html_table(tst_data):
121         """Generate an HTML table with operational data for the given test.
122
123         :param tst_data: Test data to be used to generate the table.
124         :type tst_data: pandas.Series
125         :returns: HTML table with operational data.
126         :rtype: str
127         """
128
129         colors = {
130             u"header": u"#7eade7",
131             u"empty": u"#ffffff",
132             u"body": (u"#e9f1fb", u"#d4e4f7")
133         }
134
135         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
136
137         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138         thead = ET.SubElement(
139             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140         )
141         thead.text = tst_data[u"name"]
142
143         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144         thead = ET.SubElement(
145             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146         )
147         thead.text = u"\t"
148
149         if tst_data.get(u"show-run", u"No Data") == u"No Data":
150             trow = ET.SubElement(
151                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
152             )
153             tcol = ET.SubElement(
154                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
155             )
156             tcol.text = u"No Data"
157
158             trow = ET.SubElement(
159                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
160             )
161             thead = ET.SubElement(
162                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
163             )
164             font = ET.SubElement(
165                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166             )
167             font.text = u"."
168             return str(ET.tostring(tbl, encoding=u"unicode"))
169
170         tbl_hdr = (
171             u"Name",
172             u"Nr of Vectors",
173             u"Nr of Packets",
174             u"Suspends",
175             u"Cycles per Packet",
176             u"Average Vector Size"
177         )
178
179         for dut_data in tst_data[u"show-run"].values():
180             trow = ET.SubElement(
181                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
182             )
183             tcol = ET.SubElement(
184                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
185             )
186             if dut_data.get(u"runtime", None) is None:
187                 tcol.text = u"No Data"
188                 continue
189
190             try:
191                 threads_nr = len(dut_data[u"runtime"][0][u"clocks"])
192             except (IndexError, KeyError):
193                 tcol.text = u"No Data"
194                 continue
195
196             threads = OrderedDict({idx: list() for idx in range(threads_nr)})
197             for item in dut_data[u"runtime"]:
198                 for idx in range(threads_nr):
199                     if item[u"vectors"][idx] > 0:
200                         clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
201                     elif item[u"calls"][idx] > 0:
202                         clocks = item[u"clocks"][idx] / item[u"calls"][idx]
203                     elif item[u"suspends"][idx] > 0:
204                         clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
205                     else:
206                         clocks = 0.0
207
208                     if item[u"calls"][idx] > 0:
209                         vectors_call = item[u"vectors"][idx] / item[u"calls"][
210                             idx]
211                     else:
212                         vectors_call = 0.0
213
214                     if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
215                         int(item[u"suspends"][idx]):
216                         threads[idx].append([
217                             item[u"name"],
218                             item[u"calls"][idx],
219                             item[u"vectors"][idx],
220                             item[u"suspends"][idx],
221                             clocks,
222                             vectors_call
223                         ])
224
225             bold = ET.SubElement(tcol, u"b")
226             bold.text = (
227                 f"Host IP: {dut_data.get(u'host', '')}, "
228                 f"Socket: {dut_data.get(u'socket', '')}"
229             )
230             trow = ET.SubElement(
231                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
232             )
233             thead = ET.SubElement(
234                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
235             )
236             thead.text = u"\t"
237
238             for thread_nr, thread in threads.items():
239                 trow = ET.SubElement(
240                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
241                 )
242                 tcol = ET.SubElement(
243                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
244                 )
245                 bold = ET.SubElement(tcol, u"b")
246                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
247                 trow = ET.SubElement(
248                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
249                 )
250                 for idx, col in enumerate(tbl_hdr):
251                     tcol = ET.SubElement(
252                         trow, u"td",
253                         attrib=dict(align=u"right" if idx else u"left")
254                     )
255                     font = ET.SubElement(
256                         tcol, u"font", attrib=dict(size=u"2")
257                     )
258                     bold = ET.SubElement(font, u"b")
259                     bold.text = col
260                 for row_nr, row in enumerate(thread):
261                     trow = ET.SubElement(
262                         tbl, u"tr",
263                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
264                     )
265                     for idx, col in enumerate(row):
266                         tcol = ET.SubElement(
267                             trow, u"td",
268                             attrib=dict(align=u"right" if idx else u"left")
269                         )
270                         font = ET.SubElement(
271                             tcol, u"font", attrib=dict(size=u"2")
272                         )
273                         if isinstance(col, float):
274                             font.text = f"{col:.2f}"
275                         else:
276                             font.text = str(col)
277                 trow = ET.SubElement(
278                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
279                 )
280                 thead = ET.SubElement(
281                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
282                 )
283                 thead.text = u"\t"
284
285         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
286         thead = ET.SubElement(
287             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
288         )
289         font = ET.SubElement(
290             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
291         )
292         font.text = u"."
293
294         return str(ET.tostring(tbl, encoding=u"unicode"))
295
296     for suite in suites.values:
297         html_table = str()
298         for test_data in data.values:
299             if test_data[u"parent"] not in suite[u"name"]:
300                 continue
301             html_table += _generate_html_table(test_data)
302         if not html_table:
303             continue
304         try:
305             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
306             with open(f"{file_name}", u'w') as html_file:
307                 logging.info(f"    Writing file: {file_name}")
308                 html_file.write(u".. raw:: html\n\n\t")
309                 html_file.write(html_table)
310                 html_file.write(u"\n\t<p><br><br></p>\n")
311         except KeyError:
312             logging.warning(u"The output file is not defined.")
313             return
314     logging.info(u"  Done.")
315
316
317 def table_merged_details(table, input_data):
318     """Generate the table(s) with algorithm: table_merged_details
319     specified in the specification file.
320
321     :param table: Table to generate.
322     :param input_data: Data to process.
323     :type table: pandas.Series
324     :type input_data: InputData
325     """
326
327     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
328
329     # Transform the data
330     logging.info(
331         f"    Creating the data set for the {table.get(u'type', u'')} "
332         f"{table.get(u'title', u'')}."
333     )
334     data = input_data.filter_data(table, continue_on_error=True)
335     data = input_data.merge_data(data)
336
337     sort_tests = table.get(u"sort", None)
338     if sort_tests:
339         args = dict(
340             inplace=True,
341             ascending=(sort_tests == u"ascending")
342         )
343         data.sort_index(**args)
344
345     suites = input_data.filter_data(
346         table, continue_on_error=True, data_set=u"suites")
347     suites = input_data.merge_data(suites)
348
349     # Prepare the header of the tables
350     header = list()
351     for column in table[u"columns"]:
352         header.append(
353             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
354         )
355
356     for suite in suites.values:
357         # Generate data
358         suite_name = suite[u"name"]
359         table_lst = list()
360         for test in data.keys():
361             if data[test][u"status"] != u"PASS" or \
362                     data[test][u"parent"] not in suite_name:
363                 continue
364             row_lst = list()
365             for column in table[u"columns"]:
366                 try:
367                     col_data = str(data[test][column[
368                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
369                     # Do not include tests with "Test Failed" in test message
370                     if u"Test Failed" in col_data:
371                         continue
372                     col_data = col_data.replace(
373                         u"No Data", u"Not Captured     "
374                     )
375                     if column[u"data"].split(u" ")[1] in (u"name", ):
376                         if len(col_data) > 30:
377                             col_data_lst = col_data.split(u"-")
378                             half = int(len(col_data_lst) / 2)
379                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
380                                        f"- |br| " \
381                                        f"{u'-'.join(col_data_lst[half:])}"
382                         col_data = f" |prein| {col_data} |preout| "
383                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
384                         # Temporary solution: remove NDR results from message:
385                         if bool(table.get(u'remove-ndr', False)):
386                             try:
387                                 col_data = col_data.split(u" |br| ", 1)[1]
388                             except IndexError:
389                                 pass
390                         col_data = col_data.replace(u'\n', u' |br| ').\
391                             replace(u'\r', u'').replace(u'"', u"'")
392                         col_data = f" |prein| {col_data} |preout| "
393                     elif column[u"data"].split(u" ")[1] in \
394                             (u"conf-history", u"show-run"):
395                         col_data = col_data.replace(u'\n', u' |br| ')
396                         col_data = f" |prein| {col_data[:-5]} |preout| "
397                     row_lst.append(f'"{col_data}"')
398                 except KeyError:
399                     row_lst.append(u'"Not captured"')
400             if len(row_lst) == len(table[u"columns"]):
401                 table_lst.append(row_lst)
402
403         # Write the data to file
404         if table_lst:
405             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
406             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
407             logging.info(f"      Writing file: {file_name}")
408             with open(file_name, u"wt") as file_handler:
409                 file_handler.write(u",".join(header) + u"\n")
410                 for item in table_lst:
411                     file_handler.write(u",".join(item) + u"\n")
412
413     logging.info(u"  Done.")
414
415
416 def _tpc_modify_test_name(test_name, ignore_nic=False):
417     """Modify a test name by replacing its parts.
418
419     :param test_name: Test name to be modified.
420     :param ignore_nic: If True, NIC is removed from TC name.
421     :type test_name: str
422     :type ignore_nic: bool
423     :returns: Modified test name.
424     :rtype: str
425     """
426     test_name_mod = test_name.\
427         replace(u"-ndrpdr", u"").\
428         replace(u"1t1c", u"1c").\
429         replace(u"2t1c", u"1c"). \
430         replace(u"2t2c", u"2c").\
431         replace(u"4t2c", u"2c"). \
432         replace(u"4t4c", u"4c").\
433         replace(u"8t4c", u"4c")
434
435     if ignore_nic:
436         return re.sub(REGEX_NIC, u"", test_name_mod)
437     return test_name_mod
438
439
440 def _tpc_modify_displayed_test_name(test_name):
441     """Modify a test name which is displayed in a table by replacing its parts.
442
443     :param test_name: Test name to be modified.
444     :type test_name: str
445     :returns: Modified test name.
446     :rtype: str
447     """
448     return test_name.\
449         replace(u"1t1c", u"1c").\
450         replace(u"2t1c", u"1c"). \
451         replace(u"2t2c", u"2c").\
452         replace(u"4t2c", u"2c"). \
453         replace(u"4t4c", u"4c").\
454         replace(u"8t4c", u"4c")
455
456
457 def _tpc_insert_data(target, src, include_tests):
458     """Insert src data to the target structure.
459
460     :param target: Target structure where the data is placed.
461     :param src: Source data to be placed into the target structure.
462     :param include_tests: Which results will be included (MRR, NDR, PDR).
463     :type target: list
464     :type src: dict
465     :type include_tests: str
466     """
467     try:
468         if include_tests == u"MRR":
469             target[u"mean"] = src[u"result"][u"receive-rate"]
470             target[u"stdev"] = src[u"result"][u"receive-stdev"]
471         elif include_tests == u"PDR":
472             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
473         elif include_tests == u"NDR":
474             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
475     except (KeyError, TypeError):
476         pass
477
478
479 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
480                              footnote=u"", sort_data=True, title=u"",
481                              generate_rst=True):
482     """Generate html table from input data with simple sorting possibility.
483
484     :param header: Table header.
485     :param data: Input data to be included in the table. It is a list of lists.
486         Inner lists are rows in the table. All inner lists must be of the same
487         length. The length of these lists must be the same as the length of the
488         header.
489     :param out_file_name: The name (relative or full path) where the
490         generated html table is written.
491     :param legend: The legend to display below the table.
492     :param footnote: The footnote to display below the table (and legend).
493     :param sort_data: If True the data sorting is enabled.
494     :param title: The table (and file) title.
495     :param generate_rst: If True, wrapping rst file is generated.
496     :type header: list
497     :type data: list of lists
498     :type out_file_name: str
499     :type legend: str
500     :type footnote: str
501     :type sort_data: bool
502     :type title: str
503     :type generate_rst: bool
504     """
505
506     try:
507         idx = header.index(u"Test Case")
508     except ValueError:
509         idx = 0
510     params = {
511         u"align-hdr": (
512             [u"left", u"right"],
513             [u"left", u"left", u"right"],
514             [u"left", u"left", u"left", u"right"]
515         ),
516         u"align-itm": (
517             [u"left", u"right"],
518             [u"left", u"left", u"right"],
519             [u"left", u"left", u"left", u"right"]
520         ),
521         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
522     }
523
524     df_data = pd.DataFrame(data, columns=header)
525
526     if sort_data:
527         df_sorted = [df_data.sort_values(
528             by=[key, header[idx]], ascending=[True, True]
529             if key != header[idx] else [False, True]) for key in header]
530         df_sorted_rev = [df_data.sort_values(
531             by=[key, header[idx]], ascending=[False, True]
532             if key != header[idx] else [True, True]) for key in header]
533         df_sorted.extend(df_sorted_rev)
534     else:
535         df_sorted = df_data
536
537     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
538                    for idx in range(len(df_data))]]
539     table_header = dict(
540         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
541         fill_color=u"#7eade7",
542         align=params[u"align-hdr"][idx],
543         font=dict(
544             family=u"Courier New",
545             size=12
546         )
547     )
548
549     fig = go.Figure()
550
551     if sort_data:
552         for table in df_sorted:
553             columns = [table.get(col) for col in header]
554             fig.add_trace(
555                 go.Table(
556                     columnwidth=params[u"width"][idx],
557                     header=table_header,
558                     cells=dict(
559                         values=columns,
560                         fill_color=fill_color,
561                         align=params[u"align-itm"][idx],
562                         font=dict(
563                             family=u"Courier New",
564                             size=12
565                         )
566                     )
567                 )
568             )
569
570         buttons = list()
571         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
572         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
573         for idx, hdr in enumerate(menu_items):
574             visible = [False, ] * len(menu_items)
575             visible[idx] = True
576             buttons.append(
577                 dict(
578                     label=hdr.replace(u" [Mpps]", u""),
579                     method=u"update",
580                     args=[{u"visible": visible}],
581                 )
582             )
583
584         fig.update_layout(
585             updatemenus=[
586                 go.layout.Updatemenu(
587                     type=u"dropdown",
588                     direction=u"down",
589                     x=0.0,
590                     xanchor=u"left",
591                     y=1.002,
592                     yanchor=u"bottom",
593                     active=len(menu_items) - 1,
594                     buttons=list(buttons)
595                 )
596             ],
597         )
598     else:
599         fig.add_trace(
600             go.Table(
601                 columnwidth=params[u"width"][idx],
602                 header=table_header,
603                 cells=dict(
604                     values=[df_sorted.get(col) for col in header],
605                     fill_color=fill_color,
606                     align=params[u"align-itm"][idx],
607                     font=dict(
608                         family=u"Courier New",
609                         size=12
610                     )
611                 )
612             )
613         )
614
615     ploff.plot(
616         fig,
617         show_link=False,
618         auto_open=False,
619         filename=f"{out_file_name}_in.html"
620     )
621
622     if not generate_rst:
623         return
624
625     file_name = out_file_name.split(u"/")[-1]
626     if u"vpp" in out_file_name:
627         path = u"_tmp/src/vpp_performance_tests/comparisons/"
628     else:
629         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
630     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
631     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
632         rst_file.write(
633             u"\n"
634             u".. |br| raw:: html\n\n    <br />\n\n\n"
635             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
636             u".. |preout| raw:: html\n\n    </pre>\n\n"
637         )
638         if title:
639             rst_file.write(f"{title}\n")
640             rst_file.write(f"{u'`' * len(title)}\n\n")
641         rst_file.write(
642             u".. raw:: html\n\n"
643             f'    <iframe frameborder="0" scrolling="no" '
644             f'width="1600" height="1200" '
645             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
646             f'</iframe>\n\n'
647         )
648
649         if legend:
650             try:
651                 itm_lst = legend[1:-2].split(u"\n")
652                 rst_file.write(
653                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
654                 )
655             except IndexError as err:
656                 logging.error(f"Legend cannot be written to html file\n{err}")
657         if footnote:
658             try:
659                 itm_lst = footnote[1:].split(u"\n")
660                 rst_file.write(
661                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
662                 )
663             except IndexError as err:
664                 logging.error(f"Footnote cannot be written to html file\n{err}")
665
666
667 def table_soak_vs_ndr(table, input_data):
668     """Generate the table(s) with algorithm: table_soak_vs_ndr
669     specified in the specification file.
670
671     :param table: Table to generate.
672     :param input_data: Data to process.
673     :type table: pandas.Series
674     :type input_data: InputData
675     """
676
677     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
678
679     # Transform the data
680     logging.info(
681         f"    Creating the data set for the {table.get(u'type', u'')} "
682         f"{table.get(u'title', u'')}."
683     )
684     data = input_data.filter_data(table, continue_on_error=True)
685
686     # Prepare the header of the table
687     try:
688         header = [
689             u"Test Case",
690             f"Avg({table[u'reference'][u'title']})",
691             f"Stdev({table[u'reference'][u'title']})",
692             f"Avg({table[u'compare'][u'title']})",
693             f"Stdev{table[u'compare'][u'title']})",
694             u"Diff",
695             u"Stdev(Diff)"
696         ]
697         header_str = u";".join(header) + u"\n"
698         legend = (
699             u"\nLegend:\n"
700             f"Avg({table[u'reference'][u'title']}): "
701             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
702             f"from a series of runs of the listed tests.\n"
703             f"Stdev({table[u'reference'][u'title']}): "
704             f"Standard deviation value of {table[u'reference'][u'title']} "
705             f"[Mpps] computed from a series of runs of the listed tests.\n"
706             f"Avg({table[u'compare'][u'title']}): "
707             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
708             f"a series of runs of the listed tests.\n"
709             f"Stdev({table[u'compare'][u'title']}): "
710             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
711             f"computed from a series of runs of the listed tests.\n"
712             f"Diff({table[u'reference'][u'title']},"
713             f"{table[u'compare'][u'title']}): "
714             f"Percentage change calculated for mean values.\n"
715             u"Stdev(Diff): "
716             u"Standard deviation of percentage change calculated for mean "
717             u"values."
718         )
719     except (AttributeError, KeyError) as err:
720         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
721         return
722
723     # Create a list of available SOAK test results:
724     tbl_dict = dict()
725     for job, builds in table[u"compare"][u"data"].items():
726         for build in builds:
727             for tst_name, tst_data in data[job][str(build)].items():
728                 if tst_data[u"type"] == u"SOAK":
729                     tst_name_mod = tst_name.replace(u"-soak", u"")
730                     if tbl_dict.get(tst_name_mod, None) is None:
731                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
732                         nic = groups.group(0) if groups else u""
733                         name = (
734                             f"{nic}-"
735                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
736                         )
737                         tbl_dict[tst_name_mod] = {
738                             u"name": name,
739                             u"ref-data": list(),
740                             u"cmp-data": list()
741                         }
742                     try:
743                         tbl_dict[tst_name_mod][u"cmp-data"].append(
744                             tst_data[u"throughput"][u"LOWER"])
745                     except (KeyError, TypeError):
746                         pass
747     tests_lst = tbl_dict.keys()
748
749     # Add corresponding NDR test results:
750     for job, builds in table[u"reference"][u"data"].items():
751         for build in builds:
752             for tst_name, tst_data in data[job][str(build)].items():
753                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
754                     replace(u"-mrr", u"")
755                 if tst_name_mod not in tests_lst:
756                     continue
757                 try:
758                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
759                         continue
760                     if table[u"include-tests"] == u"MRR":
761                         result = (tst_data[u"result"][u"receive-rate"],
762                                   tst_data[u"result"][u"receive-stdev"])
763                     elif table[u"include-tests"] == u"PDR":
764                         result = \
765                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
766                     elif table[u"include-tests"] == u"NDR":
767                         result = \
768                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
769                     else:
770                         result = None
771                     if result is not None:
772                         tbl_dict[tst_name_mod][u"ref-data"].append(
773                             result)
774                 except (KeyError, TypeError):
775                     continue
776
777     tbl_lst = list()
778     for tst_name in tbl_dict:
779         item = [tbl_dict[tst_name][u"name"], ]
780         data_r = tbl_dict[tst_name][u"ref-data"]
781         if data_r:
782             if table[u"include-tests"] == u"MRR":
783                 data_r_mean = data_r[0][0]
784                 data_r_stdev = data_r[0][1]
785             else:
786                 data_r_mean = mean(data_r)
787                 data_r_stdev = stdev(data_r)
788             item.append(round(data_r_mean / 1e6, 1))
789             item.append(round(data_r_stdev / 1e6, 1))
790         else:
791             data_r_mean = None
792             data_r_stdev = None
793             item.extend([None, None])
794         data_c = tbl_dict[tst_name][u"cmp-data"]
795         if data_c:
796             if table[u"include-tests"] == u"MRR":
797                 data_c_mean = data_c[0][0]
798                 data_c_stdev = data_c[0][1]
799             else:
800                 data_c_mean = mean(data_c)
801                 data_c_stdev = stdev(data_c)
802             item.append(round(data_c_mean / 1e6, 1))
803             item.append(round(data_c_stdev / 1e6, 1))
804         else:
805             data_c_mean = None
806             data_c_stdev = None
807             item.extend([None, None])
808         if data_r_mean is not None and data_c_mean is not None:
809             delta, d_stdev = relative_change_stdev(
810                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
811             try:
812                 item.append(round(delta))
813             except ValueError:
814                 item.append(delta)
815             try:
816                 item.append(round(d_stdev))
817             except ValueError:
818                 item.append(d_stdev)
819             tbl_lst.append(item)
820
821     # Sort the table according to the relative change
822     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
823
824     # Generate csv tables:
825     csv_file_name = f"{table[u'output-file']}.csv"
826     with open(csv_file_name, u"wt") as file_handler:
827         file_handler.write(header_str)
828         for test in tbl_lst:
829             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
830
831     convert_csv_to_pretty_txt(
832         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
833     )
834     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
835         file_handler.write(legend)
836
837     # Generate html table:
838     _tpc_generate_html_table(
839         header,
840         tbl_lst,
841         table[u'output-file'],
842         legend=legend,
843         title=table.get(u"title", u"")
844     )
845
846
847 def table_perf_trending_dash(table, input_data):
848     """Generate the table(s) with algorithm:
849     table_perf_trending_dash
850     specified in the specification file.
851
852     :param table: Table to generate.
853     :param input_data: Data to process.
854     :type table: pandas.Series
855     :type input_data: InputData
856     """
857
858     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
859
860     # Transform the data
861     logging.info(
862         f"    Creating the data set for the {table.get(u'type', u'')} "
863         f"{table.get(u'title', u'')}."
864     )
865     data = input_data.filter_data(table, continue_on_error=True)
866
867     # Prepare the header of the tables
868     header = [
869         u"Test Case",
870         u"Trend [Mpps]",
871         u"Short-Term Change [%]",
872         u"Long-Term Change [%]",
873         u"Regressions [#]",
874         u"Progressions [#]"
875     ]
876     header_str = u",".join(header) + u"\n"
877
878     incl_tests = table.get(u"include-tests", u"MRR")
879
880     # Prepare data to the table:
881     tbl_dict = dict()
882     for job, builds in table[u"data"].items():
883         for build in builds:
884             for tst_name, tst_data in data[job][str(build)].items():
885                 if tst_name.lower() in table.get(u"ignore-list", list()):
886                     continue
887                 if tbl_dict.get(tst_name, None) is None:
888                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
889                     if not groups:
890                         continue
891                     nic = groups.group(0)
892                     tbl_dict[tst_name] = {
893                         u"name": f"{nic}-{tst_data[u'name']}",
894                         u"data": OrderedDict()
895                     }
896                 try:
897                     if incl_tests == u"MRR":
898                         tbl_dict[tst_name][u"data"][str(build)] = \
899                             tst_data[u"result"][u"receive-rate"]
900                     elif incl_tests == u"NDR":
901                         tbl_dict[tst_name][u"data"][str(build)] = \
902                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
903                     elif incl_tests == u"PDR":
904                         tbl_dict[tst_name][u"data"][str(build)] = \
905                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
906                 except (TypeError, KeyError):
907                     pass  # No data in output.xml for this test
908
909     tbl_lst = list()
910     for tst_name in tbl_dict:
911         data_t = tbl_dict[tst_name][u"data"]
912         if len(data_t) < 2:
913             continue
914
915         classification_lst, avgs, _ = classify_anomalies(data_t)
916
917         win_size = min(len(data_t), table[u"window"])
918         long_win_size = min(len(data_t), table[u"long-trend-window"])
919
920         try:
921             max_long_avg = max(
922                 [x for x in avgs[-long_win_size:-win_size]
923                  if not isnan(x)])
924         except ValueError:
925             max_long_avg = nan
926         last_avg = avgs[-1]
927         avg_week_ago = avgs[max(-win_size, -len(avgs))]
928
929         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
930             rel_change_last = nan
931         else:
932             rel_change_last = round(
933                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
934
935         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
936             rel_change_long = nan
937         else:
938             rel_change_long = round(
939                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
940
941         if classification_lst:
942             if isnan(rel_change_last) and isnan(rel_change_long):
943                 continue
944             if isnan(last_avg) or isnan(rel_change_last) or \
945                     isnan(rel_change_long):
946                 continue
947             tbl_lst.append(
948                 [tbl_dict[tst_name][u"name"],
949                  round(last_avg / 1e6, 2),
950                  rel_change_last,
951                  rel_change_long,
952                  classification_lst[-win_size+1:].count(u"regression"),
953                  classification_lst[-win_size+1:].count(u"progression")])
954
955     tbl_lst.sort(key=lambda rel: rel[0])
956     tbl_lst.sort(key=lambda rel: rel[3])
957     tbl_lst.sort(key=lambda rel: rel[2])
958
959     tbl_sorted = list()
960     for nrr in range(table[u"window"], -1, -1):
961         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
962         for nrp in range(table[u"window"], -1, -1):
963             tbl_out = [item for item in tbl_reg if item[5] == nrp]
964             tbl_sorted.extend(tbl_out)
965
966     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
967
968     logging.info(f"    Writing file: {file_name}")
969     with open(file_name, u"wt") as file_handler:
970         file_handler.write(header_str)
971         for test in tbl_sorted:
972             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
973
974     logging.info(f"    Writing file: {table[u'output-file']}.txt")
975     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
976
977
978 def _generate_url(testbed, test_name):
979     """Generate URL to a trending plot from the name of the test case.
980
981     :param testbed: The testbed used for testing.
982     :param test_name: The name of the test case.
983     :type testbed: str
984     :type test_name: str
985     :returns: The URL to the plot with the trending data for the given test
986         case.
987     :rtype str
988     """
989
990     if u"x520" in test_name:
991         nic = u"x520"
992     elif u"x710" in test_name:
993         nic = u"x710"
994     elif u"xl710" in test_name:
995         nic = u"xl710"
996     elif u"xxv710" in test_name:
997         nic = u"xxv710"
998     elif u"vic1227" in test_name:
999         nic = u"vic1227"
1000     elif u"vic1385" in test_name:
1001         nic = u"vic1385"
1002     elif u"x553" in test_name:
1003         nic = u"x553"
1004     elif u"cx556" in test_name or u"cx556a" in test_name:
1005         nic = u"cx556a"
1006     else:
1007         nic = u""
1008
1009     if u"64b" in test_name:
1010         frame_size = u"64b"
1011     elif u"78b" in test_name:
1012         frame_size = u"78b"
1013     elif u"imix" in test_name:
1014         frame_size = u"imix"
1015     elif u"9000b" in test_name:
1016         frame_size = u"9000b"
1017     elif u"1518b" in test_name:
1018         frame_size = u"1518b"
1019     elif u"114b" in test_name:
1020         frame_size = u"114b"
1021     else:
1022         frame_size = u""
1023
1024     if u"1t1c" in test_name or \
1025         (u"-1c-" in test_name and
1026          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1027         cores = u"1t1c"
1028     elif u"2t2c" in test_name or \
1029          (u"-2c-" in test_name and
1030           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1031         cores = u"2t2c"
1032     elif u"4t4c" in test_name or \
1033          (u"-4c-" in test_name and
1034           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1035         cores = u"4t4c"
1036     elif u"2t1c" in test_name or \
1037          (u"-1c-" in test_name and
1038           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1039         cores = u"2t1c"
1040     elif u"4t2c" in test_name or \
1041          (u"-2c-" in test_name and
1042           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1043         cores = u"4t2c"
1044     elif u"8t4c" in test_name or \
1045          (u"-4c-" in test_name and
1046           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1047         cores = u"8t4c"
1048     else:
1049         cores = u""
1050
1051     if u"testpmd" in test_name:
1052         driver = u"testpmd"
1053     elif u"l3fwd" in test_name:
1054         driver = u"l3fwd"
1055     elif u"avf" in test_name:
1056         driver = u"avf"
1057     elif u"rdma" in test_name:
1058         driver = u"rdma"
1059     elif u"dnv" in testbed or u"tsh" in testbed:
1060         driver = u"ixgbe"
1061     else:
1062         driver = u"dpdk"
1063
1064     if u"macip-iacl1s" in test_name:
1065         bsf = u"features-macip-iacl1"
1066     elif u"macip-iacl10s" in test_name:
1067         bsf = u"features-macip-iacl10"
1068     elif u"macip-iacl50s" in test_name:
1069         bsf = u"features-macip-iacl50"
1070     elif u"iacl1s" in test_name:
1071         bsf = u"features-iacl1"
1072     elif u"iacl10s" in test_name:
1073         bsf = u"features-iacl10"
1074     elif u"iacl50s" in test_name:
1075         bsf = u"features-iacl50"
1076     elif u"oacl1s" in test_name:
1077         bsf = u"features-oacl1"
1078     elif u"oacl10s" in test_name:
1079         bsf = u"features-oacl10"
1080     elif u"oacl50s" in test_name:
1081         bsf = u"features-oacl50"
1082     elif u"nat44det" in test_name:
1083         bsf = u"nat44det-bidir"
1084     elif u"nat44ed" in test_name and u"udir" in test_name:
1085         bsf = u"nat44ed-udir"
1086     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1087         bsf = u"udp-cps"
1088     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1089         bsf = u"tcp-cps"
1090     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1091         bsf = u"udp-pps"
1092     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1093         bsf = u"tcp-pps"
1094     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1095         bsf = u"udp-tput"
1096     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1097         bsf = u"tcp-tput"
1098     elif u"udpsrcscale" in test_name:
1099         bsf = u"features-udp"
1100     elif u"iacl" in test_name:
1101         bsf = u"features"
1102     elif u"policer" in test_name:
1103         bsf = u"features"
1104     elif u"adl" in test_name:
1105         bsf = u"features"
1106     elif u"cop" in test_name:
1107         bsf = u"features"
1108     elif u"nat" in test_name:
1109         bsf = u"features"
1110     elif u"macip" in test_name:
1111         bsf = u"features"
1112     elif u"scale" in test_name:
1113         bsf = u"scale"
1114     elif u"base" in test_name:
1115         bsf = u"base"
1116     else:
1117         bsf = u"base"
1118
1119     if u"114b" in test_name and u"vhost" in test_name:
1120         domain = u"vts"
1121     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1122         domain = u"nat44"
1123         if u"nat44det" in test_name:
1124             domain += u"-det-bidir"
1125         else:
1126             domain += u"-ed"
1127         if u"udir" in test_name:
1128             domain += u"-unidir"
1129         elif u"-ethip4udp-" in test_name:
1130             domain += u"-udp"
1131         elif u"-ethip4tcp-" in test_name:
1132             domain += u"-tcp"
1133         if u"-cps" in test_name:
1134             domain += u"-cps"
1135         elif u"-pps" in test_name:
1136             domain += u"-pps"
1137         elif u"-tput" in test_name:
1138             domain += u"-tput"
1139     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1140         domain = u"dpdk"
1141     elif u"memif" in test_name:
1142         domain = u"container_memif"
1143     elif u"srv6" in test_name:
1144         domain = u"srv6"
1145     elif u"vhost" in test_name:
1146         domain = u"vhost"
1147         if u"vppl2xc" in test_name:
1148             driver += u"-vpp"
1149         else:
1150             driver += u"-testpmd"
1151         if u"lbvpplacp" in test_name:
1152             bsf += u"-link-bonding"
1153     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1154         domain = u"nf_service_density_vnfc"
1155     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1156         domain = u"nf_service_density_cnfc"
1157     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1158         domain = u"nf_service_density_cnfp"
1159     elif u"ipsec" in test_name:
1160         domain = u"ipsec"
1161         if u"sw" in test_name:
1162             bsf += u"-sw"
1163         elif u"hw" in test_name:
1164             bsf += u"-hw"
1165     elif u"ethip4vxlan" in test_name:
1166         domain = u"ip4_tunnels"
1167     elif u"ethip4udpgeneve" in test_name:
1168         domain = u"ip4_tunnels"
1169     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1170         domain = u"ip4"
1171     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1172         domain = u"ip6"
1173     elif u"l2xcbase" in test_name or \
1174             u"l2xcscale" in test_name or \
1175             u"l2bdbasemaclrn" in test_name or \
1176             u"l2bdscale" in test_name or \
1177             u"l2patch" in test_name:
1178         domain = u"l2"
1179     else:
1180         domain = u""
1181
1182     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1183     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1184
1185     return file_name + anchor_name
1186
1187
1188 def table_perf_trending_dash_html(table, input_data):
1189     """Generate the table(s) with algorithm:
1190     table_perf_trending_dash_html specified in the specification
1191     file.
1192
1193     :param table: Table to generate.
1194     :param input_data: Data to process.
1195     :type table: dict
1196     :type input_data: InputData
1197     """
1198
1199     _ = input_data
1200
1201     if not table.get(u"testbed", None):
1202         logging.error(
1203             f"The testbed is not defined for the table "
1204             f"{table.get(u'title', u'')}. Skipping."
1205         )
1206         return
1207
1208     test_type = table.get(u"test-type", u"MRR")
1209     if test_type not in (u"MRR", u"NDR", u"PDR"):
1210         logging.error(
1211             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1212             f"Skipping."
1213         )
1214         return
1215
1216     if test_type in (u"NDR", u"PDR"):
1217         lnk_dir = u"../ndrpdr_trending/"
1218         lnk_sufix = f"-{test_type.lower()}"
1219     else:
1220         lnk_dir = u"../trending/"
1221         lnk_sufix = u""
1222
1223     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1224
1225     try:
1226         with open(table[u"input-file"], u'rt') as csv_file:
1227             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1228     except KeyError:
1229         logging.warning(u"The input file is not defined.")
1230         return
1231     except csv.Error as err:
1232         logging.warning(
1233             f"Not possible to process the file {table[u'input-file']}.\n"
1234             f"{repr(err)}"
1235         )
1236         return
1237
1238     # Table:
1239     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1240
1241     # Table header:
1242     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1243     for idx, item in enumerate(csv_lst[0]):
1244         alignment = u"left" if idx == 0 else u"center"
1245         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1246         thead.text = item
1247
1248     # Rows:
1249     colors = {
1250         u"regression": (
1251             u"#ffcccc",
1252             u"#ff9999"
1253         ),
1254         u"progression": (
1255             u"#c6ecc6",
1256             u"#9fdf9f"
1257         ),
1258         u"normal": (
1259             u"#e9f1fb",
1260             u"#d4e4f7"
1261         )
1262     }
1263     for r_idx, row in enumerate(csv_lst[1:]):
1264         if int(row[4]):
1265             color = u"regression"
1266         elif int(row[5]):
1267             color = u"progression"
1268         else:
1269             color = u"normal"
1270         trow = ET.SubElement(
1271             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1272         )
1273
1274         # Columns:
1275         for c_idx, item in enumerate(row):
1276             tdata = ET.SubElement(
1277                 trow,
1278                 u"td",
1279                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1280             )
1281             # Name:
1282             if c_idx == 0 and table.get(u"add-links", True):
1283                 ref = ET.SubElement(
1284                     tdata,
1285                     u"a",
1286                     attrib=dict(
1287                         href=f"{lnk_dir}"
1288                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1289                         f"{lnk_sufix}"
1290                     )
1291                 )
1292                 ref.text = item
1293             else:
1294                 tdata.text = item
1295     try:
1296         with open(table[u"output-file"], u'w') as html_file:
1297             logging.info(f"    Writing file: {table[u'output-file']}")
1298             html_file.write(u".. raw:: html\n\n\t")
1299             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1300             html_file.write(u"\n\t<p><br><br></p>\n")
1301     except KeyError:
1302         logging.warning(u"The output file is not defined.")
1303         return
1304
1305
1306 def table_last_failed_tests(table, input_data):
1307     """Generate the table(s) with algorithm: table_last_failed_tests
1308     specified in the specification file.
1309
1310     :param table: Table to generate.
1311     :param input_data: Data to process.
1312     :type table: pandas.Series
1313     :type input_data: InputData
1314     """
1315
1316     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1317
1318     # Transform the data
1319     logging.info(
1320         f"    Creating the data set for the {table.get(u'type', u'')} "
1321         f"{table.get(u'title', u'')}."
1322     )
1323
1324     data = input_data.filter_data(table, continue_on_error=True)
1325
1326     if data is None or data.empty:
1327         logging.warning(
1328             f"    No data for the {table.get(u'type', u'')} "
1329             f"{table.get(u'title', u'')}."
1330         )
1331         return
1332
1333     tbl_list = list()
1334     for job, builds in table[u"data"].items():
1335         for build in builds:
1336             build = str(build)
1337             try:
1338                 version = input_data.metadata(job, build).get(u"version", u"")
1339             except KeyError:
1340                 logging.error(f"Data for {job}: {build} is not present.")
1341                 return
1342             tbl_list.append(build)
1343             tbl_list.append(version)
1344             failed_tests = list()
1345             passed = 0
1346             failed = 0
1347             for tst_data in data[job][build].values:
1348                 if tst_data[u"status"] != u"FAIL":
1349                     passed += 1
1350                     continue
1351                 failed += 1
1352                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1353                 if not groups:
1354                     continue
1355                 nic = groups.group(0)
1356                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1357             tbl_list.append(str(passed))
1358             tbl_list.append(str(failed))
1359             tbl_list.extend(failed_tests)
1360
1361     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1362     logging.info(f"    Writing file: {file_name}")
1363     with open(file_name, u"wt") as file_handler:
1364         for test in tbl_list:
1365             file_handler.write(test + u'\n')
1366
1367
1368 def table_failed_tests(table, input_data):
1369     """Generate the table(s) with algorithm: table_failed_tests
1370     specified in the specification file.
1371
1372     :param table: Table to generate.
1373     :param input_data: Data to process.
1374     :type table: pandas.Series
1375     :type input_data: InputData
1376     """
1377
1378     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1379
1380     # Transform the data
1381     logging.info(
1382         f"    Creating the data set for the {table.get(u'type', u'')} "
1383         f"{table.get(u'title', u'')}."
1384     )
1385     data = input_data.filter_data(table, continue_on_error=True)
1386
1387     test_type = u"MRR"
1388     if u"NDRPDR" in table.get(u"filter", list()):
1389         test_type = u"NDRPDR"
1390
1391     # Prepare the header of the tables
1392     header = [
1393         u"Test Case",
1394         u"Failures [#]",
1395         u"Last Failure [Time]",
1396         u"Last Failure [VPP-Build-Id]",
1397         u"Last Failure [CSIT-Job-Build-Id]"
1398     ]
1399
1400     # Generate the data for the table according to the model in the table
1401     # specification
1402
1403     now = dt.utcnow()
1404     timeperiod = timedelta(int(table.get(u"window", 7)))
1405
1406     tbl_dict = dict()
1407     for job, builds in table[u"data"].items():
1408         for build in builds:
1409             build = str(build)
1410             for tst_name, tst_data in data[job][build].items():
1411                 if tst_name.lower() in table.get(u"ignore-list", list()):
1412                     continue
1413                 if tbl_dict.get(tst_name, None) is None:
1414                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1415                     if not groups:
1416                         continue
1417                     nic = groups.group(0)
1418                     tbl_dict[tst_name] = {
1419                         u"name": f"{nic}-{tst_data[u'name']}",
1420                         u"data": OrderedDict()
1421                     }
1422                 try:
1423                     generated = input_data.metadata(job, build).\
1424                         get(u"generated", u"")
1425                     if not generated:
1426                         continue
1427                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1428                     if (now - then) <= timeperiod:
1429                         tbl_dict[tst_name][u"data"][build] = (
1430                             tst_data[u"status"],
1431                             generated,
1432                             input_data.metadata(job, build).get(u"version",
1433                                                                 u""),
1434                             build
1435                         )
1436                 except (TypeError, KeyError) as err:
1437                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1438
1439     max_fails = 0
1440     tbl_lst = list()
1441     for tst_data in tbl_dict.values():
1442         fails_nr = 0
1443         fails_last_date = u""
1444         fails_last_vpp = u""
1445         fails_last_csit = u""
1446         for val in tst_data[u"data"].values():
1447             if val[0] == u"FAIL":
1448                 fails_nr += 1
1449                 fails_last_date = val[1]
1450                 fails_last_vpp = val[2]
1451                 fails_last_csit = val[3]
1452         if fails_nr:
1453             max_fails = fails_nr if fails_nr > max_fails else max_fails
1454             tbl_lst.append([
1455                 tst_data[u"name"],
1456                 fails_nr,
1457                 fails_last_date,
1458                 fails_last_vpp,
1459                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1460                 f"-build-{fails_last_csit}"
1461             ])
1462
1463     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1464     tbl_sorted = list()
1465     for nrf in range(max_fails, -1, -1):
1466         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1467         tbl_sorted.extend(tbl_fails)
1468
1469     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1470     logging.info(f"    Writing file: {file_name}")
1471     with open(file_name, u"wt") as file_handler:
1472         file_handler.write(u",".join(header) + u"\n")
1473         for test in tbl_sorted:
1474             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1475
1476     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1477     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1478
1479
1480 def table_failed_tests_html(table, input_data):
1481     """Generate the table(s) with algorithm: table_failed_tests_html
1482     specified in the specification file.
1483
1484     :param table: Table to generate.
1485     :param input_data: Data to process.
1486     :type table: pandas.Series
1487     :type input_data: InputData
1488     """
1489
1490     _ = input_data
1491
1492     if not table.get(u"testbed", None):
1493         logging.error(
1494             f"The testbed is not defined for the table "
1495             f"{table.get(u'title', u'')}. Skipping."
1496         )
1497         return
1498
1499     test_type = table.get(u"test-type", u"MRR")
1500     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1501         logging.error(
1502             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1503             f"Skipping."
1504         )
1505         return
1506
1507     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1508         lnk_dir = u"../ndrpdr_trending/"
1509         lnk_sufix = u"-pdr"
1510     else:
1511         lnk_dir = u"../trending/"
1512         lnk_sufix = u""
1513
1514     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1515
1516     try:
1517         with open(table[u"input-file"], u'rt') as csv_file:
1518             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1519     except KeyError:
1520         logging.warning(u"The input file is not defined.")
1521         return
1522     except csv.Error as err:
1523         logging.warning(
1524             f"Not possible to process the file {table[u'input-file']}.\n"
1525             f"{repr(err)}"
1526         )
1527         return
1528
1529     # Table:
1530     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1531
1532     # Table header:
1533     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1534     for idx, item in enumerate(csv_lst[0]):
1535         alignment = u"left" if idx == 0 else u"center"
1536         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1537         thead.text = item
1538
1539     # Rows:
1540     colors = (u"#e9f1fb", u"#d4e4f7")
1541     for r_idx, row in enumerate(csv_lst[1:]):
1542         background = colors[r_idx % 2]
1543         trow = ET.SubElement(
1544             failed_tests, u"tr", attrib=dict(bgcolor=background)
1545         )
1546
1547         # Columns:
1548         for c_idx, item in enumerate(row):
1549             tdata = ET.SubElement(
1550                 trow,
1551                 u"td",
1552                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1553             )
1554             # Name:
1555             if c_idx == 0 and table.get(u"add-links", True):
1556                 ref = ET.SubElement(
1557                     tdata,
1558                     u"a",
1559                     attrib=dict(
1560                         href=f"{lnk_dir}"
1561                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1562                         f"{lnk_sufix}"
1563                     )
1564                 )
1565                 ref.text = item
1566             else:
1567                 tdata.text = item
1568     try:
1569         with open(table[u"output-file"], u'w') as html_file:
1570             logging.info(f"    Writing file: {table[u'output-file']}")
1571             html_file.write(u".. raw:: html\n\n\t")
1572             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1573             html_file.write(u"\n\t<p><br><br></p>\n")
1574     except KeyError:
1575         logging.warning(u"The output file is not defined.")
1576         return
1577
1578
1579 def table_comparison(table, input_data):
1580     """Generate the table(s) with algorithm: table_comparison
1581     specified in the specification file.
1582
1583     :param table: Table to generate.
1584     :param input_data: Data to process.
1585     :type table: pandas.Series
1586     :type input_data: InputData
1587     """
1588     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1589
1590     # Transform the data
1591     logging.info(
1592         f"    Creating the data set for the {table.get(u'type', u'')} "
1593         f"{table.get(u'title', u'')}."
1594     )
1595
1596     columns = table.get(u"columns", None)
1597     if not columns:
1598         logging.error(
1599             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1600         )
1601         return
1602
1603     cols = list()
1604     for idx, col in enumerate(columns):
1605         if col.get(u"data-set", None) is None:
1606             logging.warning(f"No data for column {col.get(u'title', u'')}")
1607             continue
1608         tag = col.get(u"tag", None)
1609         data = input_data.filter_data(
1610             table,
1611             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1612             data=col[u"data-set"],
1613             continue_on_error=True
1614         )
1615         col_data = {
1616             u"title": col.get(u"title", f"Column{idx}"),
1617             u"data": dict()
1618         }
1619         for builds in data.values:
1620             for build in builds:
1621                 for tst_name, tst_data in build.items():
1622                     if tag and tag not in tst_data[u"tags"]:
1623                         continue
1624                     tst_name_mod = \
1625                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1626                         replace(u"2n1l-", u"")
1627                     if col_data[u"data"].get(tst_name_mod, None) is None:
1628                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1629                         if u"across testbeds" in table[u"title"].lower() or \
1630                                 u"across topologies" in table[u"title"].lower():
1631                             name = _tpc_modify_displayed_test_name(name)
1632                         col_data[u"data"][tst_name_mod] = {
1633                             u"name": name,
1634                             u"replace": True,
1635                             u"data": list(),
1636                             u"mean": None,
1637                             u"stdev": None
1638                         }
1639                     _tpc_insert_data(
1640                         target=col_data[u"data"][tst_name_mod],
1641                         src=tst_data,
1642                         include_tests=table[u"include-tests"]
1643                     )
1644
1645         replacement = col.get(u"data-replacement", None)
1646         if replacement:
1647             rpl_data = input_data.filter_data(
1648                 table,
1649                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1650                 data=replacement,
1651                 continue_on_error=True
1652             )
1653             for builds in rpl_data.values:
1654                 for build in builds:
1655                     for tst_name, tst_data in build.items():
1656                         if tag and tag not in tst_data[u"tags"]:
1657                             continue
1658                         tst_name_mod = \
1659                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1660                             replace(u"2n1l-", u"")
1661                         if col_data[u"data"].get(tst_name_mod, None) is None:
1662                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1663                             if u"across testbeds" in table[u"title"].lower() \
1664                                     or u"across topologies" in \
1665                                     table[u"title"].lower():
1666                                 name = _tpc_modify_displayed_test_name(name)
1667                             col_data[u"data"][tst_name_mod] = {
1668                                 u"name": name,
1669                                 u"replace": False,
1670                                 u"data": list(),
1671                                 u"mean": None,
1672                                 u"stdev": None
1673                             }
1674                         if col_data[u"data"][tst_name_mod][u"replace"]:
1675                             col_data[u"data"][tst_name_mod][u"replace"] = False
1676                             col_data[u"data"][tst_name_mod][u"data"] = list()
1677                         _tpc_insert_data(
1678                             target=col_data[u"data"][tst_name_mod],
1679                             src=tst_data,
1680                             include_tests=table[u"include-tests"]
1681                         )
1682
1683         if table[u"include-tests"] in (u"NDR", u"PDR"):
1684             for tst_name, tst_data in col_data[u"data"].items():
1685                 if tst_data[u"data"]:
1686                     tst_data[u"mean"] = mean(tst_data[u"data"])
1687                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1688
1689         cols.append(col_data)
1690
1691     tbl_dict = dict()
1692     for col in cols:
1693         for tst_name, tst_data in col[u"data"].items():
1694             if tbl_dict.get(tst_name, None) is None:
1695                 tbl_dict[tst_name] = {
1696                     "name": tst_data[u"name"]
1697                 }
1698             tbl_dict[tst_name][col[u"title"]] = {
1699                 u"mean": tst_data[u"mean"],
1700                 u"stdev": tst_data[u"stdev"]
1701             }
1702
1703     if not tbl_dict:
1704         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1705         return
1706
1707     tbl_lst = list()
1708     for tst_data in tbl_dict.values():
1709         row = [tst_data[u"name"], ]
1710         for col in cols:
1711             row.append(tst_data.get(col[u"title"], None))
1712         tbl_lst.append(row)
1713
1714     comparisons = table.get(u"comparisons", None)
1715     rcas = list()
1716     if comparisons and isinstance(comparisons, list):
1717         for idx, comp in enumerate(comparisons):
1718             try:
1719                 col_ref = int(comp[u"reference"])
1720                 col_cmp = int(comp[u"compare"])
1721             except KeyError:
1722                 logging.warning(u"Comparison: No references defined! Skipping.")
1723                 comparisons.pop(idx)
1724                 continue
1725             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1726                     col_ref == col_cmp):
1727                 logging.warning(f"Wrong values of reference={col_ref} "
1728                                 f"and/or compare={col_cmp}. Skipping.")
1729                 comparisons.pop(idx)
1730                 continue
1731             rca_file_name = comp.get(u"rca-file", None)
1732             if rca_file_name:
1733                 try:
1734                     with open(rca_file_name, u"r") as file_handler:
1735                         rcas.append(
1736                             {
1737                                 u"title": f"RCA{idx + 1}",
1738                                 u"data": load(file_handler, Loader=FullLoader)
1739                             }
1740                         )
1741                 except (YAMLError, IOError) as err:
1742                     logging.warning(
1743                         f"The RCA file {rca_file_name} does not exist or "
1744                         f"it is corrupted!"
1745                     )
1746                     logging.debug(repr(err))
1747                     rcas.append(None)
1748             else:
1749                 rcas.append(None)
1750     else:
1751         comparisons = None
1752
1753     tbl_cmp_lst = list()
1754     if comparisons:
1755         for row in tbl_lst:
1756             new_row = deepcopy(row)
1757             for comp in comparisons:
1758                 ref_itm = row[int(comp[u"reference"])]
1759                 if ref_itm is None and \
1760                         comp.get(u"reference-alt", None) is not None:
1761                     ref_itm = row[int(comp[u"reference-alt"])]
1762                 cmp_itm = row[int(comp[u"compare"])]
1763                 if ref_itm is not None and cmp_itm is not None and \
1764                         ref_itm[u"mean"] is not None and \
1765                         cmp_itm[u"mean"] is not None and \
1766                         ref_itm[u"stdev"] is not None and \
1767                         cmp_itm[u"stdev"] is not None:
1768                     delta, d_stdev = relative_change_stdev(
1769                         ref_itm[u"mean"], cmp_itm[u"mean"],
1770                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
1771                     )
1772                     if delta is None:
1773                         break
1774                     new_row.append({
1775                         u"mean": delta * 1e6,
1776                         u"stdev": d_stdev * 1e6
1777                     })
1778                 else:
1779                     break
1780             else:
1781                 tbl_cmp_lst.append(new_row)
1782
1783     try:
1784         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1785         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1786     except TypeError as err:
1787         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1788
1789     tbl_for_csv = list()
1790     for line in tbl_cmp_lst:
1791         row = [line[0], ]
1792         for idx, itm in enumerate(line[1:]):
1793             if itm is None or not isinstance(itm, dict) or\
1794                     itm.get(u'mean', None) is None or \
1795                     itm.get(u'stdev', None) is None:
1796                 row.append(u"NT")
1797                 row.append(u"NT")
1798             else:
1799                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1800                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1801         for rca in rcas:
1802             if rca is None:
1803                 continue
1804             rca_nr = rca[u"data"].get(row[0], u"-")
1805             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1806         tbl_for_csv.append(row)
1807
1808     header_csv = [u"Test Case", ]
1809     for col in cols:
1810         header_csv.append(f"Avg({col[u'title']})")
1811         header_csv.append(f"Stdev({col[u'title']})")
1812     for comp in comparisons:
1813         header_csv.append(
1814             f"Avg({comp.get(u'title', u'')})"
1815         )
1816         header_csv.append(
1817             f"Stdev({comp.get(u'title', u'')})"
1818         )
1819     for rca in rcas:
1820         if rca:
1821             header_csv.append(rca[u"title"])
1822
1823     legend_lst = table.get(u"legend", None)
1824     if legend_lst is None:
1825         legend = u""
1826     else:
1827         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1828
1829     footnote = u""
1830     if rcas and any(rcas):
1831         footnote += u"\nRoot Cause Analysis:\n"
1832         for rca in rcas:
1833             if rca:
1834                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1835
1836     csv_file_name = f"{table[u'output-file']}-csv.csv"
1837     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1838         file_handler.write(
1839             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1840         )
1841         for test in tbl_for_csv:
1842             file_handler.write(
1843                 u",".join([f'"{item}"' for item in test]) + u"\n"
1844             )
1845         if legend_lst:
1846             for item in legend_lst:
1847                 file_handler.write(f'"{item}"\n')
1848         if footnote:
1849             for itm in footnote.split(u"\n"):
1850                 file_handler.write(f'"{itm}"\n')
1851
1852     tbl_tmp = list()
1853     max_lens = [0, ] * len(tbl_cmp_lst[0])
1854     for line in tbl_cmp_lst:
1855         row = [line[0], ]
1856         for idx, itm in enumerate(line[1:]):
1857             if itm is None or not isinstance(itm, dict) or \
1858                     itm.get(u'mean', None) is None or \
1859                     itm.get(u'stdev', None) is None:
1860                 new_itm = u"NT"
1861             else:
1862                 if idx < len(cols):
1863                     new_itm = (
1864                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1865                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1866                         replace(u"nan", u"NaN")
1867                     )
1868                 else:
1869                     new_itm = (
1870                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1871                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1872                         replace(u"nan", u"NaN")
1873                     )
1874             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1875                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1876             row.append(new_itm)
1877
1878         tbl_tmp.append(row)
1879
1880     header = [u"Test Case", ]
1881     header.extend([col[u"title"] for col in cols])
1882     header.extend([comp.get(u"title", u"") for comp in comparisons])
1883
1884     tbl_final = list()
1885     for line in tbl_tmp:
1886         row = [line[0], ]
1887         for idx, itm in enumerate(line[1:]):
1888             if itm in (u"NT", u"NaN"):
1889                 row.append(itm)
1890                 continue
1891             itm_lst = itm.rsplit(u"\u00B1", 1)
1892             itm_lst[-1] = \
1893                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1894             itm_str = u"\u00B1".join(itm_lst)
1895
1896             if idx >= len(cols):
1897                 # Diffs
1898                 rca = rcas[idx - len(cols)]
1899                 if rca:
1900                     # Add rcas to diffs
1901                     rca_nr = rca[u"data"].get(row[0], None)
1902                     if rca_nr:
1903                         hdr_len = len(header[idx + 1]) - 1
1904                         if hdr_len < 19:
1905                             hdr_len = 19
1906                         rca_nr = f"[{rca_nr}]"
1907                         itm_str = (
1908                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1909                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1910                             f"{itm_str}"
1911                         )
1912             row.append(itm_str)
1913         tbl_final.append(row)
1914
1915     # Generate csv tables:
1916     csv_file_name = f"{table[u'output-file']}.csv"
1917     logging.info(f"    Writing the file {csv_file_name}")
1918     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1919         file_handler.write(u";".join(header) + u"\n")
1920         for test in tbl_final:
1921             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1922
1923     # Generate txt table:
1924     txt_file_name = f"{table[u'output-file']}.txt"
1925     logging.info(f"    Writing the file {txt_file_name}")
1926     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1927
1928     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1929         file_handler.write(legend)
1930         file_handler.write(footnote)
1931
1932     # Generate html table:
1933     _tpc_generate_html_table(
1934         header,
1935         tbl_final,
1936         table[u'output-file'],
1937         legend=legend,
1938         footnote=footnote,
1939         sort_data=False,
1940         title=table.get(u"title", u"")
1941     )
1942
1943
1944 def table_weekly_comparison(table, in_data):
1945     """Generate the table(s) with algorithm: table_weekly_comparison
1946     specified in the specification file.
1947
1948     :param table: Table to generate.
1949     :param in_data: Data to process.
1950     :type table: pandas.Series
1951     :type in_data: InputData
1952     """
1953     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1954
1955     # Transform the data
1956     logging.info(
1957         f"    Creating the data set for the {table.get(u'type', u'')} "
1958         f"{table.get(u'title', u'')}."
1959     )
1960
1961     incl_tests = table.get(u"include-tests", None)
1962     if incl_tests not in (u"NDR", u"PDR"):
1963         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1964         return
1965
1966     nr_cols = table.get(u"nr-of-data-columns", None)
1967     if not nr_cols or nr_cols < 2:
1968         logging.error(
1969             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1970         )
1971         return
1972
1973     data = in_data.filter_data(
1974         table,
1975         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1976         continue_on_error=True
1977     )
1978
1979     header = [
1980         [u"VPP Version", ],
1981         [u"Start Timestamp", ],
1982         [u"CSIT Build", ],
1983         [u"CSIT Testbed", ]
1984     ]
1985     tbl_dict = dict()
1986     idx = 0
1987     tb_tbl = table.get(u"testbeds", None)
1988     for job_name, job_data in data.items():
1989         for build_nr, build in job_data.items():
1990             if idx >= nr_cols:
1991                 break
1992             if build.empty:
1993                 continue
1994
1995             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1996             if tb_ip and tb_tbl:
1997                 testbed = tb_tbl.get(tb_ip, u"")
1998             else:
1999                 testbed = u""
2000             header[2].insert(1, build_nr)
2001             header[3].insert(1, testbed)
2002             header[1].insert(
2003                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2004             )
2005             header[0].insert(
2006                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2007             )
2008
2009             for tst_name, tst_data in build.items():
2010                 tst_name_mod = \
2011                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2012                 if not tbl_dict.get(tst_name_mod, None):
2013                     tbl_dict[tst_name_mod] = dict(
2014                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2015                     )
2016                 try:
2017                     tbl_dict[tst_name_mod][-idx - 1] = \
2018                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2019                 except (TypeError, IndexError, KeyError, ValueError):
2020                     pass
2021             idx += 1
2022
2023     if idx < nr_cols:
2024         logging.error(u"Not enough data to build the table! Skipping")
2025         return
2026
2027     cmp_dict = dict()
2028     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2029         idx_ref = cmp.get(u"reference", None)
2030         idx_cmp = cmp.get(u"compare", None)
2031         if idx_ref is None or idx_cmp is None:
2032             continue
2033         header[0].append(
2034             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2035             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2036         )
2037         header[1].append(u"")
2038         header[2].append(u"")
2039         header[3].append(u"")
2040         for tst_name, tst_data in tbl_dict.items():
2041             if not cmp_dict.get(tst_name, None):
2042                 cmp_dict[tst_name] = list()
2043             ref_data = tst_data.get(idx_ref, None)
2044             cmp_data = tst_data.get(idx_cmp, None)
2045             if ref_data is None or cmp_data is None:
2046                 cmp_dict[tst_name].append(float(u'nan'))
2047             else:
2048                 cmp_dict[tst_name].append(
2049                     relative_change(ref_data, cmp_data)
2050                 )
2051
2052     tbl_lst_none = list()
2053     tbl_lst = list()
2054     for tst_name, tst_data in tbl_dict.items():
2055         itm_lst = [tst_data[u"name"], ]
2056         for idx in range(nr_cols):
2057             item = tst_data.get(-idx - 1, None)
2058             if item is None:
2059                 itm_lst.insert(1, None)
2060             else:
2061                 itm_lst.insert(1, round(item / 1e6, 1))
2062         itm_lst.extend(
2063             [
2064                 None if itm is None else round(itm, 1)
2065                 for itm in cmp_dict[tst_name]
2066             ]
2067         )
2068         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2069             tbl_lst_none.append(itm_lst)
2070         else:
2071             tbl_lst.append(itm_lst)
2072
2073     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2074     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2075     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2076     tbl_lst.extend(tbl_lst_none)
2077
2078     # Generate csv table:
2079     csv_file_name = f"{table[u'output-file']}.csv"
2080     logging.info(f"    Writing the file {csv_file_name}")
2081     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2082         for hdr in header:
2083             file_handler.write(u",".join(hdr) + u"\n")
2084         for test in tbl_lst:
2085             file_handler.write(u",".join(
2086                 [
2087                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2088                     replace(u"null", u"-") for item in test
2089                 ]
2090             ) + u"\n")
2091
2092     txt_file_name = f"{table[u'output-file']}.txt"
2093     logging.info(f"    Writing the file {txt_file_name}")
2094     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2095
2096     # Reorganize header in txt table
2097     txt_table = list()
2098     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2099         for line in list(file_handler):
2100             txt_table.append(line)
2101     try:
2102         txt_table.insert(5, txt_table.pop(2))
2103         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2104             file_handler.writelines(txt_table)
2105     except IndexError:
2106         pass
2107
2108     # Generate html table:
2109     hdr_html = [
2110         u"<br>".join(row) for row in zip(*header)
2111     ]
2112     _tpc_generate_html_table(
2113         hdr_html,
2114         tbl_lst,
2115         table[u'output-file'],
2116         sort_data=True,
2117         title=table.get(u"title", u""),
2118         generate_rst=False
2119     )