Report: Add data
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99     data.sort_index(inplace=True)
100
101     suites = input_data.filter_data(
102         table,
103         continue_on_error=True,
104         data_set=u"suites"
105     )
106     if suites.empty:
107         return
108     suites = input_data.merge_data(suites)
109
110     def _generate_html_table(tst_data):
111         """Generate an HTML table with operational data for the given test.
112
113         :param tst_data: Test data to be used to generate the table.
114         :type tst_data: pandas.Series
115         :returns: HTML table with operational data.
116         :rtype: str
117         """
118
119         colors = {
120             u"header": u"#7eade7",
121             u"empty": u"#ffffff",
122             u"body": (u"#e9f1fb", u"#d4e4f7")
123         }
124
125         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
126
127         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
128         thead = ET.SubElement(
129             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
130         )
131         thead.text = tst_data[u"name"]
132
133         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
134         thead = ET.SubElement(
135             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
136         )
137         thead.text = u"\t"
138
139         if tst_data.get(u"show-run", u"No Data") == u"No Data":
140             trow = ET.SubElement(
141                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
142             )
143             tcol = ET.SubElement(
144                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
145             )
146             tcol.text = u"No Data"
147             return str(ET.tostring(tbl, encoding=u"unicode"))
148
149         tbl_hdr = (
150             u"Name",
151             u"Nr of Vectors",
152             u"Nr of Packets",
153             u"Suspends",
154             u"Cycles per Packet",
155             u"Average Vector Size"
156         )
157
158         for dut_name, dut_data in tst_data[u"show-run"].items():
159             trow = ET.SubElement(
160                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
161             )
162             tcol = ET.SubElement(
163                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
164             )
165             if dut_data.get(u"threads", None) is None:
166                 tcol.text = u"No Data"
167                 continue
168             bold = ET.SubElement(tcol, u"b")
169             bold.text = dut_name
170
171             trow = ET.SubElement(
172                 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
173             )
174             tcol = ET.SubElement(
175                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
176             )
177             bold = ET.SubElement(tcol, u"b")
178             bold.text = (
179                 f"Host IP: {dut_data.get(u'host', '')}, "
180                 f"Socket: {dut_data.get(u'socket', '')}"
181             )
182             trow = ET.SubElement(
183                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
184             )
185             thead = ET.SubElement(
186                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
187             )
188             thead.text = u"\t"
189
190             for thread_nr, thread in dut_data[u"threads"].items():
191                 trow = ET.SubElement(
192                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
193                 )
194                 tcol = ET.SubElement(
195                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
196                 )
197                 bold = ET.SubElement(tcol, u"b")
198                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
199                 trow = ET.SubElement(
200                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
201                 )
202                 for idx, col in enumerate(tbl_hdr):
203                     tcol = ET.SubElement(
204                         trow, u"td",
205                         attrib=dict(align=u"right" if idx else u"left")
206                     )
207                     font = ET.SubElement(
208                         tcol, u"font", attrib=dict(size=u"2")
209                     )
210                     bold = ET.SubElement(font, u"b")
211                     bold.text = col
212                 for row_nr, row in enumerate(thread):
213                     trow = ET.SubElement(
214                         tbl, u"tr",
215                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
216                     )
217                     for idx, col in enumerate(row):
218                         tcol = ET.SubElement(
219                             trow, u"td",
220                             attrib=dict(align=u"right" if idx else u"left")
221                         )
222                         font = ET.SubElement(
223                             tcol, u"font", attrib=dict(size=u"2")
224                         )
225                         if isinstance(col, float):
226                             font.text = f"{col:.2f}"
227                         else:
228                             font.text = str(col)
229                 trow = ET.SubElement(
230                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
231                 )
232                 thead = ET.SubElement(
233                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
234                 )
235                 thead.text = u"\t"
236
237         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
238         thead = ET.SubElement(
239             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
240         )
241         font = ET.SubElement(
242             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
243         )
244         font.text = u"."
245
246         return str(ET.tostring(tbl, encoding=u"unicode"))
247
248     for suite in suites.values:
249         html_table = str()
250         for test_data in data.values:
251             if test_data[u"parent"] not in suite[u"name"]:
252                 continue
253             html_table += _generate_html_table(test_data)
254         if not html_table:
255             continue
256         try:
257             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
258             with open(f"{file_name}", u'w') as html_file:
259                 logging.info(f"    Writing file: {file_name}")
260                 html_file.write(u".. raw:: html\n\n\t")
261                 html_file.write(html_table)
262                 html_file.write(u"\n\t<p><br><br></p>\n")
263         except KeyError:
264             logging.warning(u"The output file is not defined.")
265             return
266     logging.info(u"  Done.")
267
268
269 def table_merged_details(table, input_data):
270     """Generate the table(s) with algorithm: table_merged_details
271     specified in the specification file.
272
273     :param table: Table to generate.
274     :param input_data: Data to process.
275     :type table: pandas.Series
276     :type input_data: InputData
277     """
278
279     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
280     # Transform the data
281     logging.info(
282         f"    Creating the data set for the {table.get(u'type', u'')} "
283         f"{table.get(u'title', u'')}."
284     )
285     data = input_data.filter_data(table, continue_on_error=True)
286     data = input_data.merge_data(data)
287     data.sort_index(inplace=True)
288
289     logging.info(
290         f"    Creating the data set for the {table.get(u'type', u'')} "
291         f"{table.get(u'title', u'')}."
292     )
293     suites = input_data.filter_data(
294         table, continue_on_error=True, data_set=u"suites")
295     suites = input_data.merge_data(suites)
296
297     # Prepare the header of the tables
298     header = list()
299     for column in table[u"columns"]:
300         header.append(
301             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
302         )
303
304     for suite in suites.values:
305         # Generate data
306         suite_name = suite[u"name"]
307         table_lst = list()
308         for test in data.keys():
309             if data[test][u"parent"] not in suite_name:
310                 continue
311             row_lst = list()
312             for column in table[u"columns"]:
313                 try:
314                     col_data = str(data[test][column[
315                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
316                     col_data = col_data.replace(
317                         u"No Data", u"Not Captured     "
318                     )
319                     if column[u"data"].split(u" ")[1] in (u"name", ):
320                         if len(col_data) > 30:
321                             col_data_lst = col_data.split(u"-")
322                             half = int(len(col_data_lst) / 2)
323                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
324                                        f"- |br| " \
325                                        f"{u'-'.join(col_data_lst[half:])}"
326                         col_data = f" |prein| {col_data} |preout| "
327                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
328                         col_data = f" |prein| {col_data} |preout| "
329                     elif column[u"data"].split(u" ")[1] in \
330                         (u"conf-history", u"show-run"):
331                         col_data = col_data.replace(u" |br| ", u"", 1)
332                         col_data = f" |prein| {col_data[:-5]} |preout| "
333                     row_lst.append(f'"{col_data}"')
334                 except KeyError:
335                     row_lst.append(u'"Not captured"')
336             table_lst.append(row_lst)
337
338         # Write the data to file
339         if table_lst:
340             file_name = f"{table[u'output-file']}_{suite_name}.csv"
341             logging.info(f"      Writing file: {file_name}")
342             with open(file_name, u"wt") as file_handler:
343                 file_handler.write(u",".join(header) + u"\n")
344                 for item in table_lst:
345                     file_handler.write(u",".join(item) + u"\n")
346
347     logging.info(u"  Done.")
348
349
350 def _tpc_modify_test_name(test_name):
351     """Modify a test name by replacing its parts.
352
353     :param test_name: Test name to be modified.
354     :type test_name: str
355     :returns: Modified test name.
356     :rtype: str
357     """
358     test_name_mod = test_name.\
359         replace(u"-ndrpdrdisc", u""). \
360         replace(u"-ndrpdr", u"").\
361         replace(u"-pdrdisc", u""). \
362         replace(u"-ndrdisc", u"").\
363         replace(u"-pdr", u""). \
364         replace(u"-ndr", u""). \
365         replace(u"1t1c", u"1c").\
366         replace(u"2t1c", u"1c"). \
367         replace(u"2t2c", u"2c").\
368         replace(u"4t2c", u"2c"). \
369         replace(u"4t4c", u"4c").\
370         replace(u"8t4c", u"4c")
371
372     return re.sub(REGEX_NIC, u"", test_name_mod)
373
374
375 def _tpc_modify_displayed_test_name(test_name):
376     """Modify a test name which is displayed in a table by replacing its parts.
377
378     :param test_name: Test name to be modified.
379     :type test_name: str
380     :returns: Modified test name.
381     :rtype: str
382     """
383     return test_name.\
384         replace(u"1t1c", u"1c").\
385         replace(u"2t1c", u"1c"). \
386         replace(u"2t2c", u"2c").\
387         replace(u"4t2c", u"2c"). \
388         replace(u"4t4c", u"4c").\
389         replace(u"8t4c", u"4c")
390
391
392 def _tpc_insert_data(target, src, include_tests):
393     """Insert src data to the target structure.
394
395     :param target: Target structure where the data is placed.
396     :param src: Source data to be placed into the target stucture.
397     :param include_tests: Which results will be included (MRR, NDR, PDR).
398     :type target: list
399     :type src: dict
400     :type include_tests: str
401     """
402     try:
403         if include_tests == u"MRR":
404             target.append(src[u"result"][u"receive-rate"])
405         elif include_tests == u"PDR":
406             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
407         elif include_tests == u"NDR":
408             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
409     except (KeyError, TypeError):
410         pass
411
412
413 def _tpc_sort_table(table):
414     """Sort the table this way:
415
416     1. Put "New in CSIT-XXXX" at the first place.
417     2. Put "See footnote" at the second place.
418     3. Sort the rest by "Delta".
419
420     :param table: Table to sort.
421     :type table: list
422     :returns: Sorted table.
423     :rtype: list
424     """
425
426
427     tbl_new = list()
428     tbl_see = list()
429     tbl_delta = list()
430     for item in table:
431         if isinstance(item[-1], str):
432             if u"New in CSIT" in item[-1]:
433                 tbl_new.append(item)
434             elif u"See footnote" in item[-1]:
435                 tbl_see.append(item)
436         else:
437             tbl_delta.append(item)
438
439     # Sort the tables:
440     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
441     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
442     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
443     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
444
445     # Put the tables together:
446     table = list()
447     table.extend(tbl_new)
448     table.extend(tbl_see)
449     table.extend(tbl_delta)
450
451     return table
452
453
454 def _tpc_generate_html_table(header, data, output_file_name):
455     """Generate html table from input data with simple sorting possibility.
456
457     :param header: Table header.
458     :param data: Input data to be included in the table. It is a list of lists.
459         Inner lists are rows in the table. All inner lists must be of the same
460         length. The length of these lists must be the same as the length of the
461         header.
462     :param output_file_name: The name (relative or full path) where the
463         generated html table is written.
464     :type header: list
465     :type data: list of lists
466     :type output_file_name: str
467     """
468
469     df_data = pd.DataFrame(data, columns=header)
470
471     df_sorted = [df_data.sort_values(
472         by=[key, header[0]], ascending=[True, True]
473         if key != header[0] else [False, True]) for key in header]
474     df_sorted_rev = [df_data.sort_values(
475         by=[key, header[0]], ascending=[False, True]
476         if key != header[0] else [True, True]) for key in header]
477     df_sorted.extend(df_sorted_rev)
478
479     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
480                    for idx in range(len(df_data))]]
481     table_header = dict(
482         values=[f"<b>{item}</b>" for item in header],
483         fill_color=u"#7eade7",
484         align=[u"left", u"center"]
485     )
486
487     fig = go.Figure()
488
489     for table in df_sorted:
490         columns = [table.get(col) for col in header]
491         fig.add_trace(
492             go.Table(
493                 columnwidth=[30, 10],
494                 header=table_header,
495                 cells=dict(
496                     values=columns,
497                     fill_color=fill_color,
498                     align=[u"left", u"right"]
499                 )
500             )
501         )
502
503     buttons = list()
504     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
505     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
506     menu_items.extend(menu_items_rev)
507     for idx, hdr in enumerate(menu_items):
508         visible = [False, ] * len(menu_items)
509         visible[idx] = True
510         buttons.append(
511             dict(
512                 label=hdr.replace(u" [Mpps]", u""),
513                 method=u"update",
514                 args=[{u"visible": visible}],
515             )
516         )
517
518     fig.update_layout(
519         updatemenus=[
520             go.layout.Updatemenu(
521                 type=u"dropdown",
522                 direction=u"down",
523                 x=0.03,
524                 xanchor=u"left",
525                 y=1.045,
526                 yanchor=u"top",
527                 active=len(menu_items) - 1,
528                 buttons=list(buttons)
529             )
530         ],
531         annotations=[
532             go.layout.Annotation(
533                 text=u"<b>Sort by:</b>",
534                 x=0,
535                 xref=u"paper",
536                 y=1.035,
537                 yref=u"paper",
538                 align=u"left",
539                 showarrow=False
540             )
541         ]
542     )
543
544     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
545
546
547 def table_perf_comparison(table, input_data):
548     """Generate the table(s) with algorithm: table_perf_comparison
549     specified in the specification file.
550
551     :param table: Table to generate.
552     :param input_data: Data to process.
553     :type table: pandas.Series
554     :type input_data: InputData
555     """
556
557     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
558
559     # Transform the data
560     logging.info(
561         f"    Creating the data set for the {table.get(u'type', u'')} "
562         f"{table.get(u'title', u'')}."
563     )
564     data = input_data.filter_data(table, continue_on_error=True)
565
566     # Prepare the header of the tables
567     try:
568         header = [u"Test case", ]
569
570         if table[u"include-tests"] == u"MRR":
571             hdr_param = u"Rec Rate"
572         else:
573             hdr_param = u"Thput"
574
575         history = table.get(u"history", list())
576         for item in history:
577             header.extend(
578                 [
579                     f"{item[u'title']} {hdr_param} [Mpps]",
580                     f"{item[u'title']} Stdev [Mpps]"
581                 ]
582             )
583         header.extend(
584             [
585                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
586                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
587                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
588                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
589                 u"Delta [%]"
590             ]
591         )
592         header_str = u",".join(header) + u"\n"
593     except (AttributeError, KeyError) as err:
594         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
595         return
596
597     # Prepare data to the table:
598     tbl_dict = dict()
599     # topo = ""
600     for job, builds in table[u"reference"][u"data"].items():
601         # topo = u"2n-skx" if u"2n-skx" in job else u""
602         for build in builds:
603             for tst_name, tst_data in data[job][str(build)].items():
604                 tst_name_mod = _tpc_modify_test_name(tst_name)
605                 if u"across topologies" in table[u"title"].lower():
606                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
607                 if tbl_dict.get(tst_name_mod, None) is None:
608                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
609                     nic = groups.group(0) if groups else u""
610                     name = \
611                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
612                     if u"across testbeds" in table[u"title"].lower() or \
613                             u"across topologies" in table[u"title"].lower():
614                         name = _tpc_modify_displayed_test_name(name)
615                     tbl_dict[tst_name_mod] = {
616                         u"name": name,
617                         u"ref-data": list(),
618                         u"cmp-data": list()
619                     }
620                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
621                                  src=tst_data,
622                                  include_tests=table[u"include-tests"])
623
624     replacement = table[u"reference"].get(u"data-replacement", None)
625     if replacement:
626         create_new_list = True
627         rpl_data = input_data.filter_data(
628             table, data=replacement, continue_on_error=True)
629         for job, builds in replacement.items():
630             for build in builds:
631                 for tst_name, tst_data in rpl_data[job][str(build)].items():
632                     tst_name_mod = _tpc_modify_test_name(tst_name)
633                     if u"across topologies" in table[u"title"].lower():
634                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
635                     if tbl_dict.get(tst_name_mod, None) is None:
636                         name = \
637                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
638                         if u"across testbeds" in table[u"title"].lower() or \
639                                 u"across topologies" in table[u"title"].lower():
640                             name = _tpc_modify_displayed_test_name(name)
641                         tbl_dict[tst_name_mod] = {
642                             u"name": name,
643                             u"ref-data": list(),
644                             u"cmp-data": list()
645                         }
646                     if create_new_list:
647                         create_new_list = False
648                         tbl_dict[tst_name_mod][u"ref-data"] = list()
649
650                     _tpc_insert_data(
651                         target=tbl_dict[tst_name_mod][u"ref-data"],
652                         src=tst_data,
653                         include_tests=table[u"include-tests"]
654                     )
655
656     for job, builds in table[u"compare"][u"data"].items():
657         for build in builds:
658             for tst_name, tst_data in data[job][str(build)].items():
659                 tst_name_mod = _tpc_modify_test_name(tst_name)
660                 if u"across topologies" in table[u"title"].lower():
661                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
662                 if tbl_dict.get(tst_name_mod, None) is None:
663                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
664                     nic = groups.group(0) if groups else u""
665                     name = \
666                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
667                     if u"across testbeds" in table[u"title"].lower() or \
668                             u"across topologies" in table[u"title"].lower():
669                         name = _tpc_modify_displayed_test_name(name)
670                     tbl_dict[tst_name_mod] = {
671                         u"name": name,
672                         u"ref-data": list(),
673                         u"cmp-data": list()
674                     }
675                 _tpc_insert_data(
676                     target=tbl_dict[tst_name_mod][u"cmp-data"],
677                     src=tst_data,
678                     include_tests=table[u"include-tests"]
679                 )
680
681     replacement = table[u"compare"].get(u"data-replacement", None)
682     if replacement:
683         create_new_list = True
684         rpl_data = input_data.filter_data(
685             table, data=replacement, continue_on_error=True)
686         for job, builds in replacement.items():
687             for build in builds:
688                 for tst_name, tst_data in rpl_data[job][str(build)].items():
689                     tst_name_mod = _tpc_modify_test_name(tst_name)
690                     if u"across topologies" in table[u"title"].lower():
691                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
692                     if tbl_dict.get(tst_name_mod, None) is None:
693                         name = \
694                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
695                         if u"across testbeds" in table[u"title"].lower() or \
696                                 u"across topologies" in table[u"title"].lower():
697                             name = _tpc_modify_displayed_test_name(name)
698                         tbl_dict[tst_name_mod] = {
699                             u"name": name,
700                             u"ref-data": list(),
701                             u"cmp-data": list()
702                         }
703                     if create_new_list:
704                         create_new_list = False
705                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
706
707                     _tpc_insert_data(
708                         target=tbl_dict[tst_name_mod][u"cmp-data"],
709                         src=tst_data,
710                         include_tests=table[u"include-tests"]
711                     )
712
713     for item in history:
714         for job, builds in item[u"data"].items():
715             for build in builds:
716                 for tst_name, tst_data in data[job][str(build)].items():
717                     tst_name_mod = _tpc_modify_test_name(tst_name)
718                     if u"across topologies" in table[u"title"].lower():
719                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
720                     if tbl_dict.get(tst_name_mod, None) is None:
721                         continue
722                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
723                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
724                     if tbl_dict[tst_name_mod][u"history"].\
725                             get(item[u"title"], None) is None:
726                         tbl_dict[tst_name_mod][u"history"][item[
727                             u"title"]] = list()
728                     try:
729                         if table[u"include-tests"] == u"MRR":
730                             res = tst_data[u"result"][u"receive-rate"]
731                         elif table[u"include-tests"] == u"PDR":
732                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
733                         elif table[u"include-tests"] == u"NDR":
734                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
735                         else:
736                             continue
737                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
738                             append(res)
739                     except (TypeError, KeyError):
740                         pass
741
742     tbl_lst = list()
743     footnote = False
744     for tst_name in tbl_dict:
745         item = [tbl_dict[tst_name][u"name"], ]
746         if history:
747             if tbl_dict[tst_name].get(u"history", None) is not None:
748                 for hist_data in tbl_dict[tst_name][u"history"].values():
749                     if hist_data:
750                         item.append(round(mean(hist_data) / 1000000, 2))
751                         item.append(round(stdev(hist_data) / 1000000, 2))
752                     else:
753                         item.extend([u"Not tested", u"Not tested"])
754             else:
755                 item.extend([u"Not tested", u"Not tested"])
756         data_t = tbl_dict[tst_name][u"ref-data"]
757         if data_t:
758             item.append(round(mean(data_t) / 1000000, 2))
759             item.append(round(stdev(data_t) / 1000000, 2))
760         else:
761             item.extend([u"Not tested", u"Not tested"])
762         data_t = tbl_dict[tst_name][u"cmp-data"]
763         if data_t:
764             item.append(round(mean(data_t) / 1000000, 2))
765             item.append(round(stdev(data_t) / 1000000, 2))
766         else:
767             item.extend([u"Not tested", u"Not tested"])
768         if item[-2] == u"Not tested":
769             pass
770         elif item[-4] == u"Not tested":
771             item.append(u"New in CSIT-2001")
772         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
773         #     item.append(u"See footnote [1]")
774         #     footnote = True
775         elif item[-4] != 0:
776             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
777         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
778             tbl_lst.append(item)
779
780     tbl_lst = _tpc_sort_table(tbl_lst)
781
782     # Generate csv tables:
783     csv_file = f"{table[u'output-file']}.csv"
784     with open(csv_file, u"wt") as file_handler:
785         file_handler.write(header_str)
786         for test in tbl_lst:
787             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
788
789     txt_file_name = f"{table[u'output-file']}.txt"
790     convert_csv_to_pretty_txt(csv_file, txt_file_name)
791
792     if footnote:
793         with open(txt_file_name, u'a') as txt_file:
794             txt_file.writelines([
795                 u"\nFootnotes:\n",
796                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
797                 u"2-node testbeds, dot1q encapsulation is now used on both "
798                 u"links of SUT.\n",
799                 u"    Previously dot1q was used only on a single link with the "
800                 u"other link carrying untagged Ethernet frames. This changes "
801                 u"results\n",
802                 u"    in slightly lower throughput in CSIT-1908 for these "
803                 u"tests. See release notes."
804             ])
805
806     # Generate html table:
807     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
808
809
810 def table_perf_comparison_nic(table, input_data):
811     """Generate the table(s) with algorithm: table_perf_comparison
812     specified in the specification file.
813
814     :param table: Table to generate.
815     :param input_data: Data to process.
816     :type table: pandas.Series
817     :type input_data: InputData
818     """
819
820     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
821
822     # Transform the data
823     logging.info(
824         f"    Creating the data set for the {table.get(u'type', u'')} "
825         f"{table.get(u'title', u'')}."
826     )
827     data = input_data.filter_data(table, continue_on_error=True)
828
829     # Prepare the header of the tables
830     try:
831         header = [u"Test case", ]
832
833         if table[u"include-tests"] == u"MRR":
834             hdr_param = u"Rec Rate"
835         else:
836             hdr_param = u"Thput"
837
838         history = table.get(u"history", list())
839         for item in history:
840             header.extend(
841                 [
842                     f"{item[u'title']} {hdr_param} [Mpps]",
843                     f"{item[u'title']} Stdev [Mpps]"
844                 ]
845             )
846         header.extend(
847             [
848                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
849                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
850                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
851                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
852                 u"Delta [%]"
853             ]
854         )
855         header_str = u",".join(header) + u"\n"
856     except (AttributeError, KeyError) as err:
857         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
858         return
859
860     # Prepare data to the table:
861     tbl_dict = dict()
862     # topo = u""
863     for job, builds in table[u"reference"][u"data"].items():
864         # topo = u"2n-skx" if u"2n-skx" in job else u""
865         for build in builds:
866             for tst_name, tst_data in data[job][str(build)].items():
867                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
868                     continue
869                 tst_name_mod = _tpc_modify_test_name(tst_name)
870                 if u"across topologies" in table[u"title"].lower():
871                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
872                 if tbl_dict.get(tst_name_mod, None) is None:
873                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
874                     if u"across testbeds" in table[u"title"].lower() or \
875                             u"across topologies" in table[u"title"].lower():
876                         name = _tpc_modify_displayed_test_name(name)
877                     tbl_dict[tst_name_mod] = {
878                         u"name": name,
879                         u"ref-data": list(),
880                         u"cmp-data": list()
881                     }
882                 _tpc_insert_data(
883                     target=tbl_dict[tst_name_mod][u"ref-data"],
884                     src=tst_data,
885                     include_tests=table[u"include-tests"]
886                 )
887
888     replacement = table[u"reference"].get(u"data-replacement", None)
889     if replacement:
890         create_new_list = True
891         rpl_data = input_data.filter_data(
892             table, data=replacement, continue_on_error=True)
893         for job, builds in replacement.items():
894             for build in builds:
895                 for tst_name, tst_data in rpl_data[job][str(build)].items():
896                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
897                         continue
898                     tst_name_mod = _tpc_modify_test_name(tst_name)
899                     if u"across topologies" in table[u"title"].lower():
900                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
901                     if tbl_dict.get(tst_name_mod, None) is None:
902                         name = \
903                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
904                         if u"across testbeds" in table[u"title"].lower() or \
905                                 u"across topologies" in table[u"title"].lower():
906                             name = _tpc_modify_displayed_test_name(name)
907                         tbl_dict[tst_name_mod] = {
908                             u"name": name,
909                             u"ref-data": list(),
910                             u"cmp-data": list()
911                         }
912                     if create_new_list:
913                         create_new_list = False
914                         tbl_dict[tst_name_mod][u"ref-data"] = list()
915
916                     _tpc_insert_data(
917                         target=tbl_dict[tst_name_mod][u"ref-data"],
918                         src=tst_data,
919                         include_tests=table[u"include-tests"]
920                     )
921
922     for job, builds in table[u"compare"][u"data"].items():
923         for build in builds:
924             for tst_name, tst_data in data[job][str(build)].items():
925                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
926                     continue
927                 tst_name_mod = _tpc_modify_test_name(tst_name)
928                 if u"across topologies" in table[u"title"].lower():
929                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
930                 if tbl_dict.get(tst_name_mod, None) is None:
931                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
932                     if u"across testbeds" in table[u"title"].lower() or \
933                             u"across topologies" in table[u"title"].lower():
934                         name = _tpc_modify_displayed_test_name(name)
935                     tbl_dict[tst_name_mod] = {
936                         u"name": name,
937                         u"ref-data": list(),
938                         u"cmp-data": list()
939                     }
940                 _tpc_insert_data(
941                     target=tbl_dict[tst_name_mod][u"cmp-data"],
942                     src=tst_data,
943                     include_tests=table[u"include-tests"]
944                 )
945
946     replacement = table[u"compare"].get(u"data-replacement", None)
947     if replacement:
948         create_new_list = True
949         rpl_data = input_data.filter_data(
950             table, data=replacement, continue_on_error=True)
951         for job, builds in replacement.items():
952             for build in builds:
953                 for tst_name, tst_data in rpl_data[job][str(build)].items():
954                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
955                         continue
956                     tst_name_mod = _tpc_modify_test_name(tst_name)
957                     if u"across topologies" in table[u"title"].lower():
958                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
959                     if tbl_dict.get(tst_name_mod, None) is None:
960                         name = \
961                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
962                         if u"across testbeds" in table[u"title"].lower() or \
963                                 u"across topologies" in table[u"title"].lower():
964                             name = _tpc_modify_displayed_test_name(name)
965                         tbl_dict[tst_name_mod] = {
966                             u"name": name,
967                             u"ref-data": list(),
968                             u"cmp-data": list()
969                         }
970                     if create_new_list:
971                         create_new_list = False
972                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
973
974                     _tpc_insert_data(
975                         target=tbl_dict[tst_name_mod][u"cmp-data"],
976                         src=tst_data,
977                         include_tests=table[u"include-tests"]
978                     )
979
980     for item in history:
981         for job, builds in item[u"data"].items():
982             for build in builds:
983                 for tst_name, tst_data in data[job][str(build)].items():
984                     if item[u"nic"] not in tst_data[u"tags"]:
985                         continue
986                     tst_name_mod = _tpc_modify_test_name(tst_name)
987                     if u"across topologies" in table[u"title"].lower():
988                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
989                     if tbl_dict.get(tst_name_mod, None) is None:
990                         continue
991                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
992                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
993                     if tbl_dict[tst_name_mod][u"history"].\
994                             get(item[u"title"], None) is None:
995                         tbl_dict[tst_name_mod][u"history"][item[
996                             u"title"]] = list()
997                     try:
998                         if table[u"include-tests"] == u"MRR":
999                             res = tst_data[u"result"][u"receive-rate"]
1000                         elif table[u"include-tests"] == u"PDR":
1001                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1002                         elif table[u"include-tests"] == u"NDR":
1003                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1004                         else:
1005                             continue
1006                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1007                             append(res)
1008                     except (TypeError, KeyError):
1009                         pass
1010
1011     tbl_lst = list()
1012     footnote = False
1013     for tst_name in tbl_dict:
1014         item = [tbl_dict[tst_name][u"name"], ]
1015         if history:
1016             if tbl_dict[tst_name].get(u"history", None) is not None:
1017                 for hist_data in tbl_dict[tst_name][u"history"].values():
1018                     if hist_data:
1019                         item.append(round(mean(hist_data) / 1000000, 2))
1020                         item.append(round(stdev(hist_data) / 1000000, 2))
1021                     else:
1022                         item.extend([u"Not tested", u"Not tested"])
1023             else:
1024                 item.extend([u"Not tested", u"Not tested"])
1025         data_t = tbl_dict[tst_name][u"ref-data"]
1026         if data_t:
1027             item.append(round(mean(data_t) / 1000000, 2))
1028             item.append(round(stdev(data_t) / 1000000, 2))
1029         else:
1030             item.extend([u"Not tested", u"Not tested"])
1031         data_t = tbl_dict[tst_name][u"cmp-data"]
1032         if data_t:
1033             item.append(round(mean(data_t) / 1000000, 2))
1034             item.append(round(stdev(data_t) / 1000000, 2))
1035         else:
1036             item.extend([u"Not tested", u"Not tested"])
1037         if item[-2] == u"Not tested":
1038             pass
1039         elif item[-4] == u"Not tested":
1040             item.append(u"New in CSIT-2001")
1041         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1042         #     item.append(u"See footnote [1]")
1043         #     footnote = True
1044         elif item[-4] != 0:
1045             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1046         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1047             tbl_lst.append(item)
1048
1049     tbl_lst = _tpc_sort_table(tbl_lst)
1050
1051     # Generate csv tables:
1052     csv_file = f"{table[u'output-file']}.csv"
1053     with open(csv_file, u"wt") as file_handler:
1054         file_handler.write(header_str)
1055         for test in tbl_lst:
1056             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1057
1058     txt_file_name = f"{table[u'output-file']}.txt"
1059     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1060
1061     if footnote:
1062         with open(txt_file_name, u'a') as txt_file:
1063             txt_file.writelines([
1064                 u"\nFootnotes:\n",
1065                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1066                 u"2-node testbeds, dot1q encapsulation is now used on both "
1067                 u"links of SUT.\n",
1068                 u"    Previously dot1q was used only on a single link with the "
1069                 u"other link carrying untagged Ethernet frames. This changes "
1070                 u"results\n",
1071                 u"    in slightly lower throughput in CSIT-1908 for these "
1072                 u"tests. See release notes."
1073             ])
1074
1075     # Generate html table:
1076     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1077
1078
1079 def table_nics_comparison(table, input_data):
1080     """Generate the table(s) with algorithm: table_nics_comparison
1081     specified in the specification file.
1082
1083     :param table: Table to generate.
1084     :param input_data: Data to process.
1085     :type table: pandas.Series
1086     :type input_data: InputData
1087     """
1088
1089     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1090
1091     # Transform the data
1092     logging.info(
1093         f"    Creating the data set for the {table.get(u'type', u'')} "
1094         f"{table.get(u'title', u'')}."
1095     )
1096     data = input_data.filter_data(table, continue_on_error=True)
1097
1098     # Prepare the header of the tables
1099     try:
1100         header = [u"Test case", ]
1101
1102         if table[u"include-tests"] == u"MRR":
1103             hdr_param = u"Rec Rate"
1104         else:
1105             hdr_param = u"Thput"
1106
1107         header.extend(
1108             [
1109                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1110                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1111                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1112                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1113                 u"Delta [%]"
1114             ]
1115         )
1116
1117     except (AttributeError, KeyError) as err:
1118         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1119         return
1120
1121     # Prepare data to the table:
1122     tbl_dict = dict()
1123     for job, builds in table[u"data"].items():
1124         for build in builds:
1125             for tst_name, tst_data in data[job][str(build)].items():
1126                 tst_name_mod = _tpc_modify_test_name(tst_name)
1127                 if tbl_dict.get(tst_name_mod, None) is None:
1128                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1129                     tbl_dict[tst_name_mod] = {
1130                         u"name": name,
1131                         u"ref-data": list(),
1132                         u"cmp-data": list()
1133                     }
1134                 try:
1135                     result = None
1136                     if table[u"include-tests"] == u"MRR":
1137                         result = tst_data[u"result"][u"receive-rate"]
1138                     elif table[u"include-tests"] == u"PDR":
1139                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1140                     elif table[u"include-tests"] == u"NDR":
1141                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1142                     else:
1143                         continue
1144
1145                     if result and \
1146                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1147                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1148                     elif result and \
1149                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1150                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1151                 except (TypeError, KeyError) as err:
1152                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1153                     # No data in output.xml for this test
1154
1155     tbl_lst = list()
1156     for tst_name in tbl_dict:
1157         item = [tbl_dict[tst_name][u"name"], ]
1158         data_t = tbl_dict[tst_name][u"ref-data"]
1159         if data_t:
1160             item.append(round(mean(data_t) / 1000000, 2))
1161             item.append(round(stdev(data_t) / 1000000, 2))
1162         else:
1163             item.extend([None, None])
1164         data_t = tbl_dict[tst_name][u"cmp-data"]
1165         if data_t:
1166             item.append(round(mean(data_t) / 1000000, 2))
1167             item.append(round(stdev(data_t) / 1000000, 2))
1168         else:
1169             item.extend([None, None])
1170         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1171             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1172         if len(item) == len(header):
1173             tbl_lst.append(item)
1174
1175     # Sort the table according to the relative change
1176     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1177
1178     # Generate csv tables:
1179     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1180         file_handler.write(u",".join(header) + u"\n")
1181         for test in tbl_lst:
1182             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1183
1184     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1185                               f"{table[u'output-file']}.txt")
1186
1187     # Generate html table:
1188     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1189
1190
1191 def table_soak_vs_ndr(table, input_data):
1192     """Generate the table(s) with algorithm: table_soak_vs_ndr
1193     specified in the specification file.
1194
1195     :param table: Table to generate.
1196     :param input_data: Data to process.
1197     :type table: pandas.Series
1198     :type input_data: InputData
1199     """
1200
1201     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1202
1203     # Transform the data
1204     logging.info(
1205         f"    Creating the data set for the {table.get(u'type', u'')} "
1206         f"{table.get(u'title', u'')}."
1207     )
1208     data = input_data.filter_data(table, continue_on_error=True)
1209
1210     # Prepare the header of the table
1211     try:
1212         header = [
1213             u"Test case",
1214             f"{table[u'reference'][u'title']} Thput [Mpps]",
1215             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1216             f"{table[u'compare'][u'title']} Thput [Mpps]",
1217             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1218             u"Delta [%]", u"Stdev of delta [%]"
1219         ]
1220         header_str = u",".join(header) + u"\n"
1221     except (AttributeError, KeyError) as err:
1222         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1223         return
1224
1225     # Create a list of available SOAK test results:
1226     tbl_dict = dict()
1227     for job, builds in table[u"compare"][u"data"].items():
1228         for build in builds:
1229             for tst_name, tst_data in data[job][str(build)].items():
1230                 if tst_data[u"type"] == u"SOAK":
1231                     tst_name_mod = tst_name.replace(u"-soak", u"")
1232                     if tbl_dict.get(tst_name_mod, None) is None:
1233                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1234                         nic = groups.group(0) if groups else u""
1235                         name = (
1236                             f"{nic}-"
1237                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1238                         )
1239                         tbl_dict[tst_name_mod] = {
1240                             u"name": name,
1241                             u"ref-data": list(),
1242                             u"cmp-data": list()
1243                         }
1244                     try:
1245                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1246                             tst_data[u"throughput"][u"LOWER"])
1247                     except (KeyError, TypeError):
1248                         pass
1249     tests_lst = tbl_dict.keys()
1250
1251     # Add corresponding NDR test results:
1252     for job, builds in table[u"reference"][u"data"].items():
1253         for build in builds:
1254             for tst_name, tst_data in data[job][str(build)].items():
1255                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1256                     replace(u"-mrr", u"")
1257                 if tst_name_mod not in tests_lst:
1258                     continue
1259                 try:
1260                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1261                         continue
1262                     if table[u"include-tests"] == u"MRR":
1263                         result = tst_data[u"result"][u"receive-rate"]
1264                     elif table[u"include-tests"] == u"PDR":
1265                         result = \
1266                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1267                     elif table[u"include-tests"] == u"NDR":
1268                         result = \
1269                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1270                     else:
1271                         result = None
1272                     if result is not None:
1273                         tbl_dict[tst_name_mod][u"ref-data"].append(
1274                             result)
1275                 except (KeyError, TypeError):
1276                     continue
1277
1278     tbl_lst = list()
1279     for tst_name in tbl_dict:
1280         item = [tbl_dict[tst_name][u"name"], ]
1281         data_r = tbl_dict[tst_name][u"ref-data"]
1282         if data_r:
1283             data_r_mean = mean(data_r)
1284             item.append(round(data_r_mean / 1000000, 2))
1285             data_r_stdev = stdev(data_r)
1286             item.append(round(data_r_stdev / 1000000, 2))
1287         else:
1288             data_r_mean = None
1289             data_r_stdev = None
1290             item.extend([None, None])
1291         data_c = tbl_dict[tst_name][u"cmp-data"]
1292         if data_c:
1293             data_c_mean = mean(data_c)
1294             item.append(round(data_c_mean / 1000000, 2))
1295             data_c_stdev = stdev(data_c)
1296             item.append(round(data_c_stdev / 1000000, 2))
1297         else:
1298             data_c_mean = None
1299             data_c_stdev = None
1300             item.extend([None, None])
1301         if data_r_mean and data_c_mean:
1302             delta, d_stdev = relative_change_stdev(
1303                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1304             item.append(round(delta, 2))
1305             item.append(round(d_stdev, 2))
1306             tbl_lst.append(item)
1307
1308     # Sort the table according to the relative change
1309     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1310
1311     # Generate csv tables:
1312     csv_file = f"{table[u'output-file']}.csv"
1313     with open(csv_file, u"wt") as file_handler:
1314         file_handler.write(header_str)
1315         for test in tbl_lst:
1316             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1317
1318     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1319
1320     # Generate html table:
1321     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1322
1323
1324 def table_perf_trending_dash(table, input_data):
1325     """Generate the table(s) with algorithm:
1326     table_perf_trending_dash
1327     specified in the specification file.
1328
1329     :param table: Table to generate.
1330     :param input_data: Data to process.
1331     :type table: pandas.Series
1332     :type input_data: InputData
1333     """
1334
1335     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1336
1337     # Transform the data
1338     logging.info(
1339         f"    Creating the data set for the {table.get(u'type', u'')} "
1340         f"{table.get(u'title', u'')}."
1341     )
1342     data = input_data.filter_data(table, continue_on_error=True)
1343
1344     # Prepare the header of the tables
1345     header = [
1346         u"Test Case",
1347         u"Trend [Mpps]",
1348         u"Short-Term Change [%]",
1349         u"Long-Term Change [%]",
1350         u"Regressions [#]",
1351         u"Progressions [#]"
1352     ]
1353     header_str = u",".join(header) + u"\n"
1354
1355     # Prepare data to the table:
1356     tbl_dict = dict()
1357     for job, builds in table[u"data"].items():
1358         for build in builds:
1359             for tst_name, tst_data in data[job][str(build)].items():
1360                 if tst_name.lower() in table.get(u"ignore-list", list()):
1361                     continue
1362                 if tbl_dict.get(tst_name, None) is None:
1363                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1364                     if not groups:
1365                         continue
1366                     nic = groups.group(0)
1367                     tbl_dict[tst_name] = {
1368                         u"name": f"{nic}-{tst_data[u'name']}",
1369                         u"data": OrderedDict()
1370                     }
1371                 try:
1372                     tbl_dict[tst_name][u"data"][str(build)] = \
1373                         tst_data[u"result"][u"receive-rate"]
1374                 except (TypeError, KeyError):
1375                     pass  # No data in output.xml for this test
1376
1377     tbl_lst = list()
1378     for tst_name in tbl_dict:
1379         data_t = tbl_dict[tst_name][u"data"]
1380         if len(data_t) < 2:
1381             continue
1382
1383         classification_lst, avgs = classify_anomalies(data_t)
1384
1385         win_size = min(len(data_t), table[u"window"])
1386         long_win_size = min(len(data_t), table[u"long-trend-window"])
1387
1388         try:
1389             max_long_avg = max(
1390                 [x for x in avgs[-long_win_size:-win_size]
1391                  if not isnan(x)])
1392         except ValueError:
1393             max_long_avg = nan
1394         last_avg = avgs[-1]
1395         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1396
1397         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1398             rel_change_last = nan
1399         else:
1400             rel_change_last = round(
1401                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1402
1403         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1404             rel_change_long = nan
1405         else:
1406             rel_change_long = round(
1407                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1408
1409         if classification_lst:
1410             if isnan(rel_change_last) and isnan(rel_change_long):
1411                 continue
1412             if isnan(last_avg) or isnan(rel_change_last) or \
1413                     isnan(rel_change_long):
1414                 continue
1415             tbl_lst.append(
1416                 [tbl_dict[tst_name][u"name"],
1417                  round(last_avg / 1000000, 2),
1418                  rel_change_last,
1419                  rel_change_long,
1420                  classification_lst[-win_size:].count(u"regression"),
1421                  classification_lst[-win_size:].count(u"progression")])
1422
1423     tbl_lst.sort(key=lambda rel: rel[0])
1424
1425     tbl_sorted = list()
1426     for nrr in range(table[u"window"], -1, -1):
1427         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1428         for nrp in range(table[u"window"], -1, -1):
1429             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1430             tbl_out.sort(key=lambda rel: rel[2])
1431             tbl_sorted.extend(tbl_out)
1432
1433     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1434
1435     logging.info(f"    Writing file: {file_name}")
1436     with open(file_name, u"wt") as file_handler:
1437         file_handler.write(header_str)
1438         for test in tbl_sorted:
1439             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1440
1441     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1442     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1443
1444
1445 def _generate_url(testbed, test_name):
1446     """Generate URL to a trending plot from the name of the test case.
1447
1448     :param testbed: The testbed used for testing.
1449     :param test_name: The name of the test case.
1450     :type testbed: str
1451     :type test_name: str
1452     :returns: The URL to the plot with the trending data for the given test
1453         case.
1454     :rtype str
1455     """
1456
1457     if u"x520" in test_name:
1458         nic = u"x520"
1459     elif u"x710" in test_name:
1460         nic = u"x710"
1461     elif u"xl710" in test_name:
1462         nic = u"xl710"
1463     elif u"xxv710" in test_name:
1464         nic = u"xxv710"
1465     elif u"vic1227" in test_name:
1466         nic = u"vic1227"
1467     elif u"vic1385" in test_name:
1468         nic = u"vic1385"
1469     elif u"x553" in test_name:
1470         nic = u"x553"
1471     elif u"cx556" in test_name or u"cx556a" in test_name:
1472         nic = u"cx556a"
1473     else:
1474         nic = u""
1475
1476     if u"64b" in test_name:
1477         frame_size = u"64b"
1478     elif u"78b" in test_name:
1479         frame_size = u"78b"
1480     elif u"imix" in test_name:
1481         frame_size = u"imix"
1482     elif u"9000b" in test_name:
1483         frame_size = u"9000b"
1484     elif u"1518b" in test_name:
1485         frame_size = u"1518b"
1486     elif u"114b" in test_name:
1487         frame_size = u"114b"
1488     else:
1489         frame_size = u""
1490
1491     if u"1t1c" in test_name or \
1492         (u"-1c-" in test_name and
1493          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1494         cores = u"1t1c"
1495     elif u"2t2c" in test_name or \
1496          (u"-2c-" in test_name and
1497           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1498         cores = u"2t2c"
1499     elif u"4t4c" in test_name or \
1500          (u"-4c-" in test_name and
1501           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1502         cores = u"4t4c"
1503     elif u"2t1c" in test_name or \
1504          (u"-1c-" in test_name and
1505           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1506         cores = u"2t1c"
1507     elif u"4t2c" in test_name or \
1508          (u"-2c-" in test_name and
1509           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1510         cores = u"4t2c"
1511     elif u"8t4c" in test_name or \
1512          (u"-4c-" in test_name and
1513           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1514         cores = u"8t4c"
1515     else:
1516         cores = u""
1517
1518     if u"testpmd" in test_name:
1519         driver = u"testpmd"
1520     elif u"l3fwd" in test_name:
1521         driver = u"l3fwd"
1522     elif u"avf" in test_name:
1523         driver = u"avf"
1524     elif u"rdma" in test_name:
1525         driver = u"rdma"
1526     elif u"dnv" in testbed or u"tsh" in testbed:
1527         driver = u"ixgbe"
1528     else:
1529         driver = u"i40e"
1530
1531     if u"acl" in test_name or \
1532             u"macip" in test_name or \
1533             u"nat" in test_name or \
1534             u"policer" in test_name or \
1535             u"cop" in test_name:
1536         bsf = u"features"
1537     elif u"scale" in test_name:
1538         bsf = u"scale"
1539     elif u"base" in test_name:
1540         bsf = u"base"
1541     else:
1542         bsf = u"base"
1543
1544     if u"114b" in test_name and u"vhost" in test_name:
1545         domain = u"vts"
1546     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1547         domain = u"dpdk"
1548     elif u"memif" in test_name:
1549         domain = u"container_memif"
1550     elif u"srv6" in test_name:
1551         domain = u"srv6"
1552     elif u"vhost" in test_name:
1553         domain = u"vhost"
1554         if u"vppl2xc" in test_name:
1555             driver += u"-vpp"
1556         else:
1557             driver += u"-testpmd"
1558         if u"lbvpplacp" in test_name:
1559             bsf += u"-link-bonding"
1560     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1561         domain = u"nf_service_density_vnfc"
1562     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1563         domain = u"nf_service_density_cnfc"
1564     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1565         domain = u"nf_service_density_cnfp"
1566     elif u"ipsec" in test_name:
1567         domain = u"ipsec"
1568         if u"sw" in test_name:
1569             bsf += u"-sw"
1570         elif u"hw" in test_name:
1571             bsf += u"-hw"
1572     elif u"ethip4vxlan" in test_name:
1573         domain = u"ip4_tunnels"
1574     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1575         domain = u"ip4"
1576     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1577         domain = u"ip6"
1578     elif u"l2xcbase" in test_name or \
1579             u"l2xcscale" in test_name or \
1580             u"l2bdbasemaclrn" in test_name or \
1581             u"l2bdscale" in test_name or \
1582             u"l2patch" in test_name:
1583         domain = u"l2"
1584     else:
1585         domain = u""
1586
1587     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1588     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1589
1590     return file_name + anchor_name
1591
1592
1593 def table_perf_trending_dash_html(table, input_data):
1594     """Generate the table(s) with algorithm:
1595     table_perf_trending_dash_html specified in the specification
1596     file.
1597
1598     :param table: Table to generate.
1599     :param input_data: Data to process.
1600     :type table: dict
1601     :type input_data: InputData
1602     """
1603
1604     _ = input_data
1605
1606     if not table.get(u"testbed", None):
1607         logging.error(
1608             f"The testbed is not defined for the table "
1609             f"{table.get(u'title', u'')}."
1610         )
1611         return
1612
1613     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1614
1615     try:
1616         with open(table[u"input-file"], u'rt') as csv_file:
1617             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1618     except KeyError:
1619         logging.warning(u"The input file is not defined.")
1620         return
1621     except csv.Error as err:
1622         logging.warning(
1623             f"Not possible to process the file {table[u'input-file']}.\n"
1624             f"{repr(err)}"
1625         )
1626         return
1627
1628     # Table:
1629     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1630
1631     # Table header:
1632     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1633     for idx, item in enumerate(csv_lst[0]):
1634         alignment = u"left" if idx == 0 else u"center"
1635         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1636         thead.text = item
1637
1638     # Rows:
1639     colors = {
1640         u"regression": (
1641             u"#ffcccc",
1642             u"#ff9999"
1643         ),
1644         u"progression": (
1645             u"#c6ecc6",
1646             u"#9fdf9f"
1647         ),
1648         u"normal": (
1649             u"#e9f1fb",
1650             u"#d4e4f7"
1651         )
1652     }
1653     for r_idx, row in enumerate(csv_lst[1:]):
1654         if int(row[4]):
1655             color = u"regression"
1656         elif int(row[5]):
1657             color = u"progression"
1658         else:
1659             color = u"normal"
1660         trow = ET.SubElement(
1661             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1662         )
1663
1664         # Columns:
1665         for c_idx, item in enumerate(row):
1666             tdata = ET.SubElement(
1667                 trow,
1668                 u"td",
1669                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1670             )
1671             # Name:
1672             if c_idx == 0:
1673                 ref = ET.SubElement(
1674                     tdata,
1675                     u"a",
1676                     attrib=dict(
1677                         href=f"../trending/"
1678                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1679                     )
1680                 )
1681                 ref.text = item
1682             else:
1683                 tdata.text = item
1684     try:
1685         with open(table[u"output-file"], u'w') as html_file:
1686             logging.info(f"    Writing file: {table[u'output-file']}")
1687             html_file.write(u".. raw:: html\n\n\t")
1688             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1689             html_file.write(u"\n\t<p><br><br></p>\n")
1690     except KeyError:
1691         logging.warning(u"The output file is not defined.")
1692         return
1693
1694
1695 def table_last_failed_tests(table, input_data):
1696     """Generate the table(s) with algorithm: table_last_failed_tests
1697     specified in the specification file.
1698
1699     :param table: Table to generate.
1700     :param input_data: Data to process.
1701     :type table: pandas.Series
1702     :type input_data: InputData
1703     """
1704
1705     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1706
1707     # Transform the data
1708     logging.info(
1709         f"    Creating the data set for the {table.get(u'type', u'')} "
1710         f"{table.get(u'title', u'')}."
1711     )
1712
1713     data = input_data.filter_data(table, continue_on_error=True)
1714
1715     if data is None or data.empty:
1716         logging.warning(
1717             f"    No data for the {table.get(u'type', u'')} "
1718             f"{table.get(u'title', u'')}."
1719         )
1720         return
1721
1722     tbl_list = list()
1723     for job, builds in table[u"data"].items():
1724         for build in builds:
1725             build = str(build)
1726             try:
1727                 version = input_data.metadata(job, build).get(u"version", u"")
1728             except KeyError:
1729                 logging.error(f"Data for {job}: {build} is not present.")
1730                 return
1731             tbl_list.append(build)
1732             tbl_list.append(version)
1733             failed_tests = list()
1734             passed = 0
1735             failed = 0
1736             for tst_data in data[job][build].values:
1737                 if tst_data[u"status"] != u"FAIL":
1738                     passed += 1
1739                     continue
1740                 failed += 1
1741                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1742                 if not groups:
1743                     continue
1744                 nic = groups.group(0)
1745                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1746             tbl_list.append(str(passed))
1747             tbl_list.append(str(failed))
1748             tbl_list.extend(failed_tests)
1749
1750     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1751     logging.info(f"    Writing file: {file_name}")
1752     with open(file_name, u"wt") as file_handler:
1753         for test in tbl_list:
1754             file_handler.write(test + u'\n')
1755
1756
1757 def table_failed_tests(table, input_data):
1758     """Generate the table(s) with algorithm: table_failed_tests
1759     specified in the specification file.
1760
1761     :param table: Table to generate.
1762     :param input_data: Data to process.
1763     :type table: pandas.Series
1764     :type input_data: InputData
1765     """
1766
1767     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1768
1769     # Transform the data
1770     logging.info(
1771         f"    Creating the data set for the {table.get(u'type', u'')} "
1772         f"{table.get(u'title', u'')}."
1773     )
1774     data = input_data.filter_data(table, continue_on_error=True)
1775
1776     # Prepare the header of the tables
1777     header = [
1778         u"Test Case",
1779         u"Failures [#]",
1780         u"Last Failure [Time]",
1781         u"Last Failure [VPP-Build-Id]",
1782         u"Last Failure [CSIT-Job-Build-Id]"
1783     ]
1784
1785     # Generate the data for the table according to the model in the table
1786     # specification
1787
1788     now = dt.utcnow()
1789     timeperiod = timedelta(int(table.get(u"window", 7)))
1790
1791     tbl_dict = dict()
1792     for job, builds in table[u"data"].items():
1793         for build in builds:
1794             build = str(build)
1795             for tst_name, tst_data in data[job][build].items():
1796                 if tst_name.lower() in table.get(u"ignore-list", list()):
1797                     continue
1798                 if tbl_dict.get(tst_name, None) is None:
1799                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1800                     if not groups:
1801                         continue
1802                     nic = groups.group(0)
1803                     tbl_dict[tst_name] = {
1804                         u"name": f"{nic}-{tst_data[u'name']}",
1805                         u"data": OrderedDict()
1806                     }
1807                 try:
1808                     generated = input_data.metadata(job, build).\
1809                         get(u"generated", u"")
1810                     if not generated:
1811                         continue
1812                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1813                     if (now - then) <= timeperiod:
1814                         tbl_dict[tst_name][u"data"][build] = (
1815                             tst_data[u"status"],
1816                             generated,
1817                             input_data.metadata(job, build).get(u"version",
1818                                                                 u""),
1819                             build
1820                         )
1821                 except (TypeError, KeyError) as err:
1822                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1823
1824     max_fails = 0
1825     tbl_lst = list()
1826     for tst_data in tbl_dict.values():
1827         fails_nr = 0
1828         fails_last_date = u""
1829         fails_last_vpp = u""
1830         fails_last_csit = u""
1831         for val in tst_data[u"data"].values():
1832             if val[0] == u"FAIL":
1833                 fails_nr += 1
1834                 fails_last_date = val[1]
1835                 fails_last_vpp = val[2]
1836                 fails_last_csit = val[3]
1837         if fails_nr:
1838             max_fails = fails_nr if fails_nr > max_fails else max_fails
1839             tbl_lst.append(
1840                 [
1841                     tst_data[u"name"],
1842                     fails_nr,
1843                     fails_last_date,
1844                     fails_last_vpp,
1845                     f"mrr-daily-build-{fails_last_csit}"
1846                 ]
1847             )
1848
1849     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1850     tbl_sorted = list()
1851     for nrf in range(max_fails, -1, -1):
1852         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1853         tbl_sorted.extend(tbl_fails)
1854
1855     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1856     logging.info(f"    Writing file: {file_name}")
1857     with open(file_name, u"wt") as file_handler:
1858         file_handler.write(u",".join(header) + u"\n")
1859         for test in tbl_sorted:
1860             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1861
1862     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1863     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1864
1865
1866 def table_failed_tests_html(table, input_data):
1867     """Generate the table(s) with algorithm: table_failed_tests_html
1868     specified in the specification file.
1869
1870     :param table: Table to generate.
1871     :param input_data: Data to process.
1872     :type table: pandas.Series
1873     :type input_data: InputData
1874     """
1875
1876     _ = input_data
1877
1878     if not table.get(u"testbed", None):
1879         logging.error(
1880             f"The testbed is not defined for the table "
1881             f"{table.get(u'title', u'')}."
1882         )
1883         return
1884
1885     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1886
1887     try:
1888         with open(table[u"input-file"], u'rt') as csv_file:
1889             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1890     except KeyError:
1891         logging.warning(u"The input file is not defined.")
1892         return
1893     except csv.Error as err:
1894         logging.warning(
1895             f"Not possible to process the file {table[u'input-file']}.\n"
1896             f"{repr(err)}"
1897         )
1898         return
1899
1900     # Table:
1901     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1902
1903     # Table header:
1904     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1905     for idx, item in enumerate(csv_lst[0]):
1906         alignment = u"left" if idx == 0 else u"center"
1907         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1908         thead.text = item
1909
1910     # Rows:
1911     colors = (u"#e9f1fb", u"#d4e4f7")
1912     for r_idx, row in enumerate(csv_lst[1:]):
1913         background = colors[r_idx % 2]
1914         trow = ET.SubElement(
1915             failed_tests, u"tr", attrib=dict(bgcolor=background)
1916         )
1917
1918         # Columns:
1919         for c_idx, item in enumerate(row):
1920             tdata = ET.SubElement(
1921                 trow,
1922                 u"td",
1923                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1924             )
1925             # Name:
1926             if c_idx == 0:
1927                 ref = ET.SubElement(
1928                     tdata,
1929                     u"a",
1930                     attrib=dict(
1931                         href=f"../trending/"
1932                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1933                     )
1934                 )
1935                 ref.text = item
1936             else:
1937                 tdata.text = item
1938     try:
1939         with open(table[u"output-file"], u'w') as html_file:
1940             logging.info(f"    Writing file: {table[u'output-file']}")
1941             html_file.write(u".. raw:: html\n\n\t")
1942             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1943             html_file.write(u"\n\t<p><br><br></p>\n")
1944     except KeyError:
1945         logging.warning(u"The output file is not defined.")
1946         return