Report: Add data
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99     data.sort_index(inplace=True)
100
101     suites = input_data.filter_data(
102         table,
103         continue_on_error=True,
104         data_set=u"suites"
105     )
106     if suites.empty:
107         return
108     suites = input_data.merge_data(suites)
109
110     def _generate_html_table(tst_data):
111         """Generate an HTML table with operational data for the given test.
112
113         :param tst_data: Test data to be used to generate the table.
114         :type tst_data: pandas.Series
115         :returns: HTML table with operational data.
116         :rtype: str
117         """
118
119         colors = {
120             u"header": u"#7eade7",
121             u"empty": u"#ffffff",
122             u"body": (u"#e9f1fb", u"#d4e4f7")
123         }
124
125         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
126
127         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
128         thead = ET.SubElement(
129             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
130         )
131         thead.text = tst_data[u"name"]
132
133         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
134         thead = ET.SubElement(
135             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
136         )
137         thead.text = u"\t"
138
139         if tst_data.get(u"show-run", u"No Data") == u"No Data":
140             trow = ET.SubElement(
141                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
142             )
143             tcol = ET.SubElement(
144                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
145             )
146             tcol.text = u"No Data"
147             return str(ET.tostring(tbl, encoding=u"unicode"))
148
149         tbl_hdr = (
150             u"Name",
151             u"Nr of Vectors",
152             u"Nr of Packets",
153             u"Suspends",
154             u"Cycles per Packet",
155             u"Average Vector Size"
156         )
157
158         for dut_name, dut_data in tst_data[u"show-run"].items():
159             trow = ET.SubElement(
160                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
161             )
162             tcol = ET.SubElement(
163                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
164             )
165             if dut_data.get(u"threads", None) is None:
166                 tcol.text = u"No Data"
167                 continue
168             bold = ET.SubElement(tcol, u"b")
169             bold.text = dut_name
170
171             trow = ET.SubElement(
172                 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
173             )
174             tcol = ET.SubElement(
175                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
176             )
177             bold = ET.SubElement(tcol, u"b")
178             bold.text = (
179                 f"Host IP: {dut_data.get(u'host', '')}, "
180                 f"Socket: {dut_data.get(u'socket', '')}"
181             )
182             trow = ET.SubElement(
183                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
184             )
185             thead = ET.SubElement(
186                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
187             )
188             thead.text = u"\t"
189
190             for thread_nr, thread in dut_data[u"threads"].items():
191                 trow = ET.SubElement(
192                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
193                 )
194                 tcol = ET.SubElement(
195                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
196                 )
197                 bold = ET.SubElement(tcol, u"b")
198                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
199                 trow = ET.SubElement(
200                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
201                 )
202                 for idx, col in enumerate(tbl_hdr):
203                     tcol = ET.SubElement(
204                         trow, u"td",
205                         attrib=dict(align=u"right" if idx else u"left")
206                     )
207                     font = ET.SubElement(
208                         tcol, u"font", attrib=dict(size=u"2")
209                     )
210                     bold = ET.SubElement(font, u"b")
211                     bold.text = col
212                 for row_nr, row in enumerate(thread):
213                     trow = ET.SubElement(
214                         tbl, u"tr",
215                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
216                     )
217                     for idx, col in enumerate(row):
218                         tcol = ET.SubElement(
219                             trow, u"td",
220                             attrib=dict(align=u"right" if idx else u"left")
221                         )
222                         font = ET.SubElement(
223                             tcol, u"font", attrib=dict(size=u"2")
224                         )
225                         if isinstance(col, float):
226                             font.text = f"{col:.2f}"
227                         else:
228                             font.text = str(col)
229                 trow = ET.SubElement(
230                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
231                 )
232                 thead = ET.SubElement(
233                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
234                 )
235                 thead.text = u"\t"
236
237         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
238         thead = ET.SubElement(
239             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
240         )
241         font = ET.SubElement(
242             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
243         )
244         font.text = u"."
245
246         return str(ET.tostring(tbl, encoding=u"unicode"))
247
248     for suite in suites.values:
249         html_table = str()
250         for test_data in data.values:
251             if test_data[u"parent"] not in suite[u"name"]:
252                 continue
253             html_table += _generate_html_table(test_data)
254         if not html_table:
255             continue
256         try:
257             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
258             with open(f"{file_name}", u'w') as html_file:
259                 logging.info(f"    Writing file: {file_name}")
260                 html_file.write(u".. raw:: html\n\n\t")
261                 html_file.write(html_table)
262                 html_file.write(u"\n\t<p><br><br></p>\n")
263         except KeyError:
264             logging.warning(u"The output file is not defined.")
265             return
266     logging.info(u"  Done.")
267
268
269 def table_merged_details(table, input_data):
270     """Generate the table(s) with algorithm: table_merged_details
271     specified in the specification file.
272
273     :param table: Table to generate.
274     :param input_data: Data to process.
275     :type table: pandas.Series
276     :type input_data: InputData
277     """
278
279     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
280     # Transform the data
281     logging.info(
282         f"    Creating the data set for the {table.get(u'type', u'')} "
283         f"{table.get(u'title', u'')}."
284     )
285     data = input_data.filter_data(table, continue_on_error=True)
286     data = input_data.merge_data(data)
287     data.sort_index(inplace=True)
288
289     logging.info(
290         f"    Creating the data set for the {table.get(u'type', u'')} "
291         f"{table.get(u'title', u'')}."
292     )
293     suites = input_data.filter_data(
294         table, continue_on_error=True, data_set=u"suites")
295     suites = input_data.merge_data(suites)
296
297     # Prepare the header of the tables
298     header = list()
299     for column in table[u"columns"]:
300         header.append(
301             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
302         )
303
304     for suite in suites.values:
305         # Generate data
306         suite_name = suite[u"name"]
307         table_lst = list()
308         for test in data.keys():
309             if data[test][u"parent"] not in suite_name:
310                 continue
311             row_lst = list()
312             for column in table[u"columns"]:
313                 try:
314                     col_data = str(data[test][column[
315                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
316                     col_data = col_data.replace(
317                         u"No Data", u"Not Captured     "
318                     )
319                     if column[u"data"].split(u" ")[1] in (u"name", ):
320                         if len(col_data) > 30:
321                             col_data_lst = col_data.split(u"-")
322                             half = int(len(col_data_lst) / 2)
323                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
324                                        f"- |br| " \
325                                        f"{u'-'.join(col_data_lst[half:])}"
326                         col_data = f" |prein| {col_data} |preout| "
327                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
328                         col_data = f" |prein| {col_data} |preout| "
329                     elif column[u"data"].split(u" ")[1] in \
330                         (u"conf-history", u"show-run"):
331                         col_data = col_data.replace(u" |br| ", u"", 1)
332                         col_data = f" |prein| {col_data[:-5]} |preout| "
333                     row_lst.append(f'"{col_data}"')
334                 except KeyError:
335                     row_lst.append(u'"Not captured"')
336             table_lst.append(row_lst)
337
338         # Write the data to file
339         if table_lst:
340             file_name = f"{table[u'output-file']}_{suite_name}.csv"
341             logging.info(f"      Writing file: {file_name}")
342             with open(file_name, u"wt") as file_handler:
343                 file_handler.write(u",".join(header) + u"\n")
344                 for item in table_lst:
345                     file_handler.write(u",".join(item) + u"\n")
346
347     logging.info(u"  Done.")
348
349
350 def _tpc_modify_test_name(test_name):
351     """Modify a test name by replacing its parts.
352
353     :param test_name: Test name to be modified.
354     :type test_name: str
355     :returns: Modified test name.
356     :rtype: str
357     """
358     test_name_mod = test_name.\
359         replace(u"-ndrpdrdisc", u""). \
360         replace(u"-ndrpdr", u"").\
361         replace(u"-pdrdisc", u""). \
362         replace(u"-ndrdisc", u"").\
363         replace(u"-pdr", u""). \
364         replace(u"-ndr", u""). \
365         replace(u"1t1c", u"1c").\
366         replace(u"2t1c", u"1c"). \
367         replace(u"2t2c", u"2c").\
368         replace(u"4t2c", u"2c"). \
369         replace(u"4t4c", u"4c").\
370         replace(u"8t4c", u"4c")
371
372     return re.sub(REGEX_NIC, u"", test_name_mod)
373
374
375 def _tpc_modify_displayed_test_name(test_name):
376     """Modify a test name which is displayed in a table by replacing its parts.
377
378     :param test_name: Test name to be modified.
379     :type test_name: str
380     :returns: Modified test name.
381     :rtype: str
382     """
383     return test_name.\
384         replace(u"1t1c", u"1c").\
385         replace(u"2t1c", u"1c"). \
386         replace(u"2t2c", u"2c").\
387         replace(u"4t2c", u"2c"). \
388         replace(u"4t4c", u"4c").\
389         replace(u"8t4c", u"4c")
390
391
392 def _tpc_insert_data(target, src, include_tests):
393     """Insert src data to the target structure.
394
395     :param target: Target structure where the data is placed.
396     :param src: Source data to be placed into the target stucture.
397     :param include_tests: Which results will be included (MRR, NDR, PDR).
398     :type target: list
399     :type src: dict
400     :type include_tests: str
401     """
402     try:
403         if include_tests == u"MRR":
404             target.append(src[u"result"][u"receive-rate"])
405         elif include_tests == u"PDR":
406             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
407         elif include_tests == u"NDR":
408             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
409     except (KeyError, TypeError):
410         pass
411
412
413 def _tpc_sort_table(table):
414     """Sort the table this way:
415
416     1. Put "New in CSIT-XXXX" at the first place.
417     2. Put "See footnote" at the second place.
418     3. Sort the rest by "Delta".
419
420     :param table: Table to sort.
421     :type table: list
422     :returns: Sorted table.
423     :rtype: list
424     """
425
426
427     tbl_new = list()
428     tbl_see = list()
429     tbl_delta = list()
430     for item in table:
431         if isinstance(item[-1], str):
432             if u"New in CSIT" in item[-1]:
433                 tbl_new.append(item)
434             elif u"See footnote" in item[-1]:
435                 tbl_see.append(item)
436         else:
437             tbl_delta.append(item)
438
439     # Sort the tables:
440     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
441     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
442     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
443     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
444
445     # Put the tables together:
446     table = list()
447     table.extend(tbl_new)
448     table.extend(tbl_see)
449     table.extend(tbl_delta)
450
451     return table
452
453
454 def _tpc_generate_html_table(header, data, output_file_name):
455     """Generate html table from input data with simple sorting possibility.
456
457     :param header: Table header.
458     :param data: Input data to be included in the table. It is a list of lists.
459         Inner lists are rows in the table. All inner lists must be of the same
460         length. The length of these lists must be the same as the length of the
461         header.
462     :param output_file_name: The name (relative or full path) where the
463         generated html table is written.
464     :type header: list
465     :type data: list of lists
466     :type output_file_name: str
467     """
468
469     df_data = pd.DataFrame(data, columns=header)
470
471     df_sorted = [df_data.sort_values(
472         by=[key, header[0]], ascending=[True, True]
473         if key != header[0] else [False, True]) for key in header]
474     df_sorted_rev = [df_data.sort_values(
475         by=[key, header[0]], ascending=[False, True]
476         if key != header[0] else [True, True]) for key in header]
477     df_sorted.extend(df_sorted_rev)
478
479     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
480                    for idx in range(len(df_data))]]
481     table_header = dict(
482         values=[f"<b>{item}</b>" for item in header],
483         fill_color=u"#7eade7",
484         align=[u"left", u"center"]
485     )
486
487     fig = go.Figure()
488
489     for table in df_sorted:
490         columns = [table.get(col) for col in header]
491         fig.add_trace(
492             go.Table(
493                 columnwidth=[30, 10],
494                 header=table_header,
495                 cells=dict(
496                     values=columns,
497                     fill_color=fill_color,
498                     align=[u"left", u"right"]
499                 )
500             )
501         )
502
503     buttons = list()
504     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
505     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
506     menu_items.extend(menu_items_rev)
507     for idx, hdr in enumerate(menu_items):
508         visible = [False, ] * len(menu_items)
509         visible[idx] = True
510         buttons.append(
511             dict(
512                 label=hdr.replace(u" [Mpps]", u""),
513                 method=u"update",
514                 args=[{u"visible": visible}],
515             )
516         )
517
518     fig.update_layout(
519         updatemenus=[
520             go.layout.Updatemenu(
521                 type=u"dropdown",
522                 direction=u"down",
523                 x=0.03,
524                 xanchor=u"left",
525                 y=1.045,
526                 yanchor=u"top",
527                 active=len(menu_items) - 1,
528                 buttons=list(buttons)
529             )
530         ],
531         annotations=[
532             go.layout.Annotation(
533                 text=u"<b>Sort by:</b>",
534                 x=0,
535                 xref=u"paper",
536                 y=1.035,
537                 yref=u"paper",
538                 align=u"left",
539                 showarrow=False
540             )
541         ]
542     )
543
544     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
545
546
547 def table_perf_comparison(table, input_data):
548     """Generate the table(s) with algorithm: table_perf_comparison
549     specified in the specification file.
550
551     :param table: Table to generate.
552     :param input_data: Data to process.
553     :type table: pandas.Series
554     :type input_data: InputData
555     """
556
557     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
558
559     # Transform the data
560     logging.info(
561         f"    Creating the data set for the {table.get(u'type', u'')} "
562         f"{table.get(u'title', u'')}."
563     )
564     data = input_data.filter_data(table, continue_on_error=True)
565
566     # Prepare the header of the tables
567     try:
568         header = [u"Test case", ]
569
570         if table[u"include-tests"] == u"MRR":
571             hdr_param = u"Rec Rate"
572         else:
573             hdr_param = u"Thput"
574
575         history = table.get(u"history", list())
576         for item in history:
577             header.extend(
578                 [
579                     f"{item[u'title']} {hdr_param} [Mpps]",
580                     f"{item[u'title']} Stdev [Mpps]"
581                 ]
582             )
583         header.extend(
584             [
585                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
586                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
587                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
588                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
589                 u"Delta [%]"
590             ]
591         )
592         header_str = u",".join(header) + u"\n"
593     except (AttributeError, KeyError) as err:
594         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
595         return
596
597     # Prepare data to the table:
598     tbl_dict = dict()
599     # topo = ""
600     for job, builds in table[u"reference"][u"data"].items():
601         # topo = u"2n-skx" if u"2n-skx" in job else u""
602         for build in builds:
603             for tst_name, tst_data in data[job][str(build)].items():
604                 tst_name_mod = _tpc_modify_test_name(tst_name)
605                 if u"across topologies" in table[u"title"].lower():
606                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
607                 if tbl_dict.get(tst_name_mod, None) is None:
608                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
609                     nic = groups.group(0) if groups else u""
610                     name = \
611                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
612                     if u"across testbeds" in table[u"title"].lower() or \
613                             u"across topologies" in table[u"title"].lower():
614                         name = _tpc_modify_displayed_test_name(name)
615                     tbl_dict[tst_name_mod] = {
616                         u"name": name,
617                         u"ref-data": list(),
618                         u"cmp-data": list()
619                     }
620                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
621                                  src=tst_data,
622                                  include_tests=table[u"include-tests"])
623
624     replacement = table[u"reference"].get(u"data-replacement", None)
625     if replacement:
626         create_new_list = True
627         rpl_data = input_data.filter_data(
628             table, data=replacement, continue_on_error=True)
629         for job, builds in replacement.items():
630             for build in builds:
631                 for tst_name, tst_data in rpl_data[job][str(build)].items():
632                     tst_name_mod = _tpc_modify_test_name(tst_name)
633                     if u"across topologies" in table[u"title"].lower():
634                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
635                     if tbl_dict.get(tst_name_mod, None) is None:
636                         name = \
637                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
638                         if u"across testbeds" in table[u"title"].lower() or \
639                                 u"across topologies" in table[u"title"].lower():
640                             name = _tpc_modify_displayed_test_name(name)
641                         tbl_dict[tst_name_mod] = {
642                             u"name": name,
643                             u"ref-data": list(),
644                             u"cmp-data": list()
645                         }
646                     if create_new_list:
647                         create_new_list = False
648                         tbl_dict[tst_name_mod][u"ref-data"] = list()
649
650                     _tpc_insert_data(
651                         target=tbl_dict[tst_name_mod][u"ref-data"],
652                         src=tst_data,
653                         include_tests=table[u"include-tests"]
654                     )
655
656     for job, builds in table[u"compare"][u"data"].items():
657         for build in builds:
658             for tst_name, tst_data in data[job][str(build)].items():
659                 tst_name_mod = _tpc_modify_test_name(tst_name)
660                 if u"across topologies" in table[u"title"].lower():
661                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
662                 if tbl_dict.get(tst_name_mod, None) is None:
663                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
664                     nic = groups.group(0) if groups else u""
665                     name = \
666                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
667                     if u"across testbeds" in table[u"title"].lower() or \
668                             u"across topologies" in table[u"title"].lower():
669                         name = _tpc_modify_displayed_test_name(name)
670                     tbl_dict[tst_name_mod] = {
671                         u"name": name,
672                         u"ref-data": list(),
673                         u"cmp-data": list()
674                     }
675                 _tpc_insert_data(
676                     target=tbl_dict[tst_name_mod][u"cmp-data"],
677                     src=tst_data,
678                     include_tests=table[u"include-tests"]
679                 )
680
681     replacement = table[u"compare"].get(u"data-replacement", None)
682     if replacement:
683         create_new_list = True
684         rpl_data = input_data.filter_data(
685             table, data=replacement, continue_on_error=True)
686         for job, builds in replacement.items():
687             for build in builds:
688                 for tst_name, tst_data in rpl_data[job][str(build)].items():
689                     tst_name_mod = _tpc_modify_test_name(tst_name)
690                     if u"across topologies" in table[u"title"].lower():
691                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
692                     if tbl_dict.get(tst_name_mod, None) is None:
693                         name = \
694                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
695                         if u"across testbeds" in table[u"title"].lower() or \
696                                 u"across topologies" in table[u"title"].lower():
697                             name = _tpc_modify_displayed_test_name(name)
698                         tbl_dict[tst_name_mod] = {
699                             u"name": name,
700                             u"ref-data": list(),
701                             u"cmp-data": list()
702                         }
703                     if create_new_list:
704                         create_new_list = False
705                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
706
707                     _tpc_insert_data(
708                         target=tbl_dict[tst_name_mod][u"cmp-data"],
709                         src=tst_data,
710                         include_tests=table[u"include-tests"]
711                     )
712
713     for item in history:
714         for job, builds in item[u"data"].items():
715             for build in builds:
716                 for tst_name, tst_data in data[job][str(build)].items():
717                     tst_name_mod = _tpc_modify_test_name(tst_name)
718                     if u"across topologies" in table[u"title"].lower():
719                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
720                     if tbl_dict.get(tst_name_mod, None) is None:
721                         continue
722                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
723                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
724                     if tbl_dict[tst_name_mod][u"history"].\
725                             get(item[u"title"], None) is None:
726                         tbl_dict[tst_name_mod][u"history"][item[
727                             u"title"]] = list()
728                     try:
729                         if table[u"include-tests"] == u"MRR":
730                             res = tst_data[u"result"][u"receive-rate"]
731                         elif table[u"include-tests"] == u"PDR":
732                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
733                         elif table[u"include-tests"] == u"NDR":
734                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
735                         else:
736                             continue
737                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
738                             append(res)
739                     except (TypeError, KeyError):
740                         pass
741
742     tbl_lst = list()
743     footnote = False
744     for tst_name in tbl_dict:
745         item = [tbl_dict[tst_name][u"name"], ]
746         if history:
747             if tbl_dict[tst_name].get(u"history", None) is not None:
748                 for hist_data in tbl_dict[tst_name][u"history"].values():
749                     if hist_data:
750                         item.append(round(mean(hist_data) / 1000000, 2))
751                         item.append(round(stdev(hist_data) / 1000000, 2))
752                     else:
753                         item.extend([u"Not tested", u"Not tested"])
754             else:
755                 item.extend([u"Not tested", u"Not tested"])
756         data_t = tbl_dict[tst_name][u"ref-data"]
757         if data_t:
758             item.append(round(mean(data_t) / 1000000, 2))
759             item.append(round(stdev(data_t) / 1000000, 2))
760         else:
761             item.extend([u"Not tested", u"Not tested"])
762         data_t = tbl_dict[tst_name][u"cmp-data"]
763         if data_t:
764             item.append(round(mean(data_t) / 1000000, 2))
765             item.append(round(stdev(data_t) / 1000000, 2))
766         else:
767             item.extend([u"Not tested", u"Not tested"])
768         if item[-2] == u"Not tested":
769             pass
770         elif item[-4] == u"Not tested":
771             item.append(u"New in CSIT-2001")
772         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
773         #     item.append(u"See footnote [1]")
774         #     footnote = True
775         elif item[-4] != 0:
776             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
777         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
778             tbl_lst.append(item)
779
780     tbl_lst = _tpc_sort_table(tbl_lst)
781
782     # Generate csv tables:
783     csv_file = f"{table[u'output-file']}.csv"
784     with open(csv_file, u"wt") as file_handler:
785         file_handler.write(header_str)
786         for test in tbl_lst:
787             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
788
789     txt_file_name = f"{table[u'output-file']}.txt"
790     convert_csv_to_pretty_txt(csv_file, txt_file_name)
791
792     if footnote:
793         with open(txt_file_name, u'a') as txt_file:
794             txt_file.writelines([
795                 u"\nFootnotes:\n",
796                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
797                 u"2-node testbeds, dot1q encapsulation is now used on both "
798                 u"links of SUT.\n",
799                 u"    Previously dot1q was used only on a single link with the "
800                 u"other link carrying untagged Ethernet frames. This changes "
801                 u"results\n",
802                 u"    in slightly lower throughput in CSIT-1908 for these "
803                 u"tests. See release notes."
804             ])
805
806     # Generate html table:
807     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
808
809
810 def table_perf_comparison_nic(table, input_data):
811     """Generate the table(s) with algorithm: table_perf_comparison
812     specified in the specification file.
813
814     :param table: Table to generate.
815     :param input_data: Data to process.
816     :type table: pandas.Series
817     :type input_data: InputData
818     """
819
820     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
821
822     # Transform the data
823     logging.info(
824         f"    Creating the data set for the {table.get(u'type', u'')} "
825         f"{table.get(u'title', u'')}."
826     )
827     data = input_data.filter_data(table, continue_on_error=True)
828
829     # Prepare the header of the tables
830     try:
831         header = [u"Test case", ]
832
833         if table[u"include-tests"] == u"MRR":
834             hdr_param = u"Rec Rate"
835         else:
836             hdr_param = u"Thput"
837
838         history = table.get(u"history", list())
839         for item in history:
840             header.extend(
841                 [
842                     f"{item[u'title']} {hdr_param} [Mpps]",
843                     f"{item[u'title']} Stdev [Mpps]"
844                 ]
845             )
846         header.extend(
847             [
848                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
849                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
850                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
851                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
852                 u"Delta [%]"
853             ]
854         )
855         header_str = u",".join(header) + u"\n"
856     except (AttributeError, KeyError) as err:
857         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
858         return
859
860     # Prepare data to the table:
861     tbl_dict = dict()
862     # topo = u""
863     for job, builds in table[u"reference"][u"data"].items():
864         # topo = u"2n-skx" if u"2n-skx" in job else u""
865         for build in builds:
866             for tst_name, tst_data in data[job][str(build)].items():
867                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
868                     continue
869                 tst_name_mod = _tpc_modify_test_name(tst_name)
870                 if u"across topologies" in table[u"title"].lower():
871                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
872                 if tbl_dict.get(tst_name_mod, None) is None:
873                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
874                     if u"across testbeds" in table[u"title"].lower() or \
875                             u"across topologies" in table[u"title"].lower():
876                         name = _tpc_modify_displayed_test_name(name)
877                     tbl_dict[tst_name_mod] = {
878                         u"name": name,
879                         u"ref-data": list(),
880                         u"cmp-data": list()
881                     }
882                 _tpc_insert_data(
883                     target=tbl_dict[tst_name_mod][u"ref-data"],
884                     src=tst_data,
885                     include_tests=table[u"include-tests"]
886                 )
887
888     replacement = table[u"reference"].get(u"data-replacement", None)
889     if replacement:
890         create_new_list = True
891         rpl_data = input_data.filter_data(
892             table, data=replacement, continue_on_error=True)
893         for job, builds in replacement.items():
894             for build in builds:
895                 for tst_name, tst_data in rpl_data[job][str(build)].items():
896                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
897                         continue
898                     tst_name_mod = _tpc_modify_test_name(tst_name)
899                     if u"across topologies" in table[u"title"].lower():
900                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
901                     if tbl_dict.get(tst_name_mod, None) is None:
902                         name = \
903                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
904                         if u"across testbeds" in table[u"title"].lower() or \
905                                 u"across topologies" in table[u"title"].lower():
906                             name = _tpc_modify_displayed_test_name(name)
907                         tbl_dict[tst_name_mod] = {
908                             u"name": name,
909                             u"ref-data": list(),
910                             u"cmp-data": list()
911                         }
912                     if create_new_list:
913                         create_new_list = False
914                         tbl_dict[tst_name_mod][u"ref-data"] = list()
915
916                     _tpc_insert_data(
917                         target=tbl_dict[tst_name_mod][u"ref-data"],
918                         src=tst_data,
919                         include_tests=table[u"include-tests"]
920                     )
921
922     for job, builds in table[u"compare"][u"data"].items():
923         for build in builds:
924             for tst_name, tst_data in data[job][str(build)].items():
925                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
926                     continue
927                 tst_name_mod = _tpc_modify_test_name(tst_name)
928                 if u"across topologies" in table[u"title"].lower():
929                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
930                 if tbl_dict.get(tst_name_mod, None) is None:
931                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
932                     if u"across testbeds" in table[u"title"].lower() or \
933                             u"across topologies" in table[u"title"].lower():
934                         name = _tpc_modify_displayed_test_name(name)
935                     tbl_dict[tst_name_mod] = {
936                         u"name": name,
937                         u"ref-data": list(),
938                         u"cmp-data": list()
939                     }
940                 _tpc_insert_data(
941                     target=tbl_dict[tst_name_mod][u"cmp-data"],
942                     src=tst_data,
943                     include_tests=table[u"include-tests"]
944                 )
945
946     replacement = table[u"compare"].get(u"data-replacement", None)
947     if replacement:
948         create_new_list = True
949         rpl_data = input_data.filter_data(
950             table, data=replacement, continue_on_error=True)
951         for job, builds in replacement.items():
952             for build in builds:
953                 for tst_name, tst_data in rpl_data[job][str(build)].items():
954                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
955                         continue
956                     tst_name_mod = _tpc_modify_test_name(tst_name)
957                     if u"across topologies" in table[u"title"].lower():
958                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
959                     if tbl_dict.get(tst_name_mod, None) is None:
960                         name = \
961                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
962                         if u"across testbeds" in table[u"title"].lower() or \
963                                 u"across topologies" in table[u"title"].lower():
964                             name = _tpc_modify_displayed_test_name(name)
965                         tbl_dict[tst_name_mod] = {
966                             u"name": name,
967                             u"ref-data": list(),
968                             u"cmp-data": list()
969                         }
970                     if create_new_list:
971                         create_new_list = False
972                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
973
974                     _tpc_insert_data(
975                         target=tbl_dict[tst_name_mod][u"cmp-data"],
976                         src=tst_data,
977                         include_tests=table[u"include-tests"]
978                     )
979
980     for item in history:
981         for job, builds in item[u"data"].items():
982             for build in builds:
983                 for tst_name, tst_data in data[job][str(build)].items():
984                     if item[u"nic"] not in tst_data[u"tags"]:
985                         continue
986                     tst_name_mod = _tpc_modify_test_name(tst_name)
987                     if u"across topologies" in table[u"title"].lower():
988                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
989                     if tbl_dict.get(tst_name_mod, None) is None:
990                         continue
991                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
992                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
993                     if tbl_dict[tst_name_mod][u"history"].\
994                             get(item[u"title"], None) is None:
995                         tbl_dict[tst_name_mod][u"history"][item[
996                             u"title"]] = list()
997                     try:
998                         if table[u"include-tests"] == u"MRR":
999                             res = tst_data[u"result"][u"receive-rate"]
1000                         elif table[u"include-tests"] == u"PDR":
1001                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1002                         elif table[u"include-tests"] == u"NDR":
1003                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1004                         else:
1005                             continue
1006                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1007                             append(res)
1008                     except (TypeError, KeyError):
1009                         pass
1010
1011     tbl_lst = list()
1012     footnote = False
1013     for tst_name in tbl_dict:
1014         item = [tbl_dict[tst_name][u"name"], ]
1015         if history:
1016             if tbl_dict[tst_name].get(u"history", None) is not None:
1017                 for hist_data in tbl_dict[tst_name][u"history"].values():
1018                     if hist_data:
1019                         item.append(round(mean(hist_data) / 1000000, 2))
1020                         item.append(round(stdev(hist_data) / 1000000, 2))
1021                     else:
1022                         item.extend([u"Not tested", u"Not tested"])
1023             else:
1024                 item.extend([u"Not tested", u"Not tested"])
1025         data_t = tbl_dict[tst_name][u"ref-data"]
1026         if data_t:
1027             item.append(round(mean(data_t) / 1000000, 2))
1028             item.append(round(stdev(data_t) / 1000000, 2))
1029         else:
1030             item.extend([u"Not tested", u"Not tested"])
1031         data_t = tbl_dict[tst_name][u"cmp-data"]
1032         if data_t:
1033             item.append(round(mean(data_t) / 1000000, 2))
1034             item.append(round(stdev(data_t) / 1000000, 2))
1035         else:
1036             item.extend([u"Not tested", u"Not tested"])
1037         if item[-2] == u"Not tested":
1038             pass
1039         elif item[-4] == u"Not tested":
1040             item.append(u"New in CSIT-2001")
1041         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1042         #     item.append(u"See footnote [1]")
1043         #     footnote = True
1044         elif item[-4] != 0:
1045             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1046         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1047             tbl_lst.append(item)
1048
1049     tbl_lst = _tpc_sort_table(tbl_lst)
1050
1051     # Generate csv tables:
1052     csv_file = f"{table[u'output-file']}.csv"
1053     with open(csv_file, u"wt") as file_handler:
1054         file_handler.write(header_str)
1055         for test in tbl_lst:
1056             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1057
1058     txt_file_name = f"{table[u'output-file']}.txt"
1059     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1060
1061     if footnote:
1062         with open(txt_file_name, u'a') as txt_file:
1063             txt_file.writelines([
1064                 u"\nFootnotes:\n",
1065                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1066                 u"2-node testbeds, dot1q encapsulation is now used on both "
1067                 u"links of SUT.\n",
1068                 u"    Previously dot1q was used only on a single link with the "
1069                 u"other link carrying untagged Ethernet frames. This changes "
1070                 u"results\n",
1071                 u"    in slightly lower throughput in CSIT-1908 for these "
1072                 u"tests. See release notes."
1073             ])
1074
1075     # Generate html table:
1076     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1077
1078
1079 def table_nics_comparison(table, input_data):
1080     """Generate the table(s) with algorithm: table_nics_comparison
1081     specified in the specification file.
1082
1083     :param table: Table to generate.
1084     :param input_data: Data to process.
1085     :type table: pandas.Series
1086     :type input_data: InputData
1087     """
1088
1089     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1090
1091     # Transform the data
1092     logging.info(
1093         f"    Creating the data set for the {table.get(u'type', u'')} "
1094         f"{table.get(u'title', u'')}."
1095     )
1096     data = input_data.filter_data(table, continue_on_error=True)
1097
1098     # Prepare the header of the tables
1099     try:
1100         header = [u"Test case", ]
1101
1102         if table[u"include-tests"] == u"MRR":
1103             hdr_param = u"Rec Rate"
1104         else:
1105             hdr_param = u"Thput"
1106
1107         header.extend(
1108             [
1109                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1110                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1111                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1112                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1113                 u"Delta [%]"
1114             ]
1115         )
1116
1117     except (AttributeError, KeyError) as err:
1118         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1119         return
1120
1121     # Prepare data to the table:
1122     tbl_dict = dict()
1123     for job, builds in table[u"data"].items():
1124         for build in builds:
1125             for tst_name, tst_data in data[job][str(build)].items():
1126                 tst_name_mod = _tpc_modify_test_name(tst_name)
1127                 if tbl_dict.get(tst_name_mod, None) is None:
1128                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1129                     tbl_dict[tst_name_mod] = {
1130                         u"name": name,
1131                         u"ref-data": list(),
1132                         u"cmp-data": list()
1133                     }
1134                 try:
1135                     result = None
1136                     if table[u"include-tests"] == u"MRR":
1137                         result = tst_data[u"result"][u"receive-rate"]
1138                     elif table[u"include-tests"] == u"PDR":
1139                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1140                     elif table[u"include-tests"] == u"NDR":
1141                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1142                     else:
1143                         continue
1144
1145                     if result and \
1146                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1147                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1148                     elif result and \
1149                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1150                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1151                 except (TypeError, KeyError) as err:
1152                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1153                     # No data in output.xml for this test
1154
1155     tbl_lst = list()
1156     for tst_name in tbl_dict:
1157         item = [tbl_dict[tst_name][u"name"], ]
1158         data_t = tbl_dict[tst_name][u"ref-data"]
1159         if data_t:
1160             item.append(round(mean(data_t) / 1000000, 2))
1161             item.append(round(stdev(data_t) / 1000000, 2))
1162         else:
1163             item.extend([None, None])
1164         data_t = tbl_dict[tst_name][u"cmp-data"]
1165         if data_t:
1166             item.append(round(mean(data_t) / 1000000, 2))
1167             item.append(round(stdev(data_t) / 1000000, 2))
1168         else:
1169             item.extend([None, None])
1170         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1171             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1172         if len(item) == len(header):
1173             tbl_lst.append(item)
1174
1175     # Sort the table according to the relative change
1176     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1177
1178     # Generate csv tables:
1179     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1180         file_handler.write(u",".join(header) + u"\n")
1181         for test in tbl_lst:
1182             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1183
1184     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1185                               f"{table[u'output-file']}.txt")
1186
1187     # Generate html table:
1188     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1189
1190
1191 def table_soak_vs_ndr(table, input_data):
1192     """Generate the table(s) with algorithm: table_soak_vs_ndr
1193     specified in the specification file.
1194
1195     :param table: Table to generate.
1196     :param input_data: Data to process.
1197     :type table: pandas.Series
1198     :type input_data: InputData
1199     """
1200
1201     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1202
1203     # Transform the data
1204     logging.info(
1205         f"    Creating the data set for the {table.get(u'type', u'')} "
1206         f"{table.get(u'title', u'')}."
1207     )
1208     data = input_data.filter_data(table, continue_on_error=True)
1209
1210     # Prepare the header of the table
1211     try:
1212         header = [
1213             u"Test case",
1214             f"{table[u'reference'][u'title']} Thput [Mpps]",
1215             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1216             f"{table[u'compare'][u'title']} Thput [Mpps]",
1217             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1218             u"Delta [%]", u"Stdev of delta [%]"
1219         ]
1220         header_str = u",".join(header) + u"\n"
1221     except (AttributeError, KeyError) as err:
1222         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1223         return
1224
1225     # Create a list of available SOAK test results:
1226     tbl_dict = dict()
1227     for job, builds in table[u"compare"][u"data"].items():
1228         for build in builds:
1229             for tst_name, tst_data in data[job][str(build)].items():
1230                 if tst_data[u"type"] == u"SOAK":
1231                     tst_name_mod = tst_name.replace(u"-soak", u"")
1232                     if tbl_dict.get(tst_name_mod, None) is None:
1233                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1234                         nic = groups.group(0) if groups else u""
1235                         name = (
1236                             f"{nic}-"
1237                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1238                         )
1239                         tbl_dict[tst_name_mod] = {
1240                             u"name": name,
1241                             u"ref-data": list(),
1242                             u"cmp-data": list()
1243                         }
1244                     try:
1245                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1246                             tst_data[u"throughput"][u"LOWER"])
1247                     except (KeyError, TypeError):
1248                         pass
1249     tests_lst = tbl_dict.keys()
1250
1251     # Add corresponding NDR test results:
1252     for job, builds in table[u"reference"][u"data"].items():
1253         for build in builds:
1254             for tst_name, tst_data in data[job][str(build)].items():
1255                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1256                     replace(u"-mrr", u"")
1257                 if tst_name_mod not in tests_lst:
1258                     continue
1259                 try:
1260                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1261                         continue
1262                     if table[u"include-tests"] == u"MRR":
1263                         result = tst_data[u"result"][u"receive-rate"]
1264                     elif table[u"include-tests"] == u"PDR":
1265                         result = \
1266                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1267                     elif table[u"include-tests"] == u"NDR":
1268                         result = \
1269                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1270                     else:
1271                         result = None
1272                     if result is not None:
1273                         tbl_dict[tst_name_mod][u"ref-data"].append(
1274                             result)
1275                 except (KeyError, TypeError):
1276                     continue
1277
1278     tbl_lst = list()
1279     for tst_name in tbl_dict:
1280         item = [tbl_dict[tst_name][u"name"], ]
1281         data_r = tbl_dict[tst_name][u"ref-data"]
1282         if data_r:
1283             data_r_mean = mean(data_r)
1284             item.append(round(data_r_mean / 1000000, 2))
1285             data_r_stdev = stdev(data_r)
1286             item.append(round(data_r_stdev / 1000000, 2))
1287         else:
1288             data_r_mean = None
1289             data_r_stdev = None
1290             item.extend([None, None])
1291         data_c = tbl_dict[tst_name][u"cmp-data"]
1292         if data_c:
1293             data_c_mean = mean(data_c)
1294             item.append(round(data_c_mean / 1000000, 2))
1295             data_c_stdev = stdev(data_c)
1296             item.append(round(data_c_stdev / 1000000, 2))
1297         else:
1298             data_c_mean = None
1299             data_c_stdev = None
1300             item.extend([None, None])
1301         if data_r_mean and data_c_mean:
1302             delta, d_stdev = relative_change_stdev(
1303                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1304             item.append(round(delta, 2))
1305             item.append(round(d_stdev, 2))
1306             tbl_lst.append(item)
1307
1308     # Sort the table according to the relative change
1309     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1310
1311     # Generate csv tables:
1312     csv_file = f"{table[u'output-file']}.csv"
1313     with open(csv_file, u"wt") as file_handler:
1314         file_handler.write(header_str)
1315         for test in tbl_lst:
1316             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1317
1318     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1319
1320     # Generate html table:
1321     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1322
1323
1324 def table_perf_trending_dash(table, input_data):
1325     """Generate the table(s) with algorithm:
1326     table_perf_trending_dash
1327     specified in the specification file.
1328
1329     :param table: Table to generate.
1330     :param input_data: Data to process.
1331     :type table: pandas.Series
1332     :type input_data: InputData
1333     """
1334
1335     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1336
1337     # Transform the data
1338     logging.info(
1339         f"    Creating the data set for the {table.get(u'type', u'')} "
1340         f"{table.get(u'title', u'')}."
1341     )
1342     data = input_data.filter_data(table, continue_on_error=True)
1343
1344     # Prepare the header of the tables
1345     header = [
1346         u"Test Case",
1347         u"Trend [Mpps]",
1348         u"Short-Term Change [%]",
1349         u"Long-Term Change [%]",
1350         u"Regressions [#]",
1351         u"Progressions [#]"
1352     ]
1353     header_str = u",".join(header) + u"\n"
1354
1355     # Prepare data to the table:
1356     tbl_dict = dict()
1357     for job, builds in table[u"data"].items():
1358         for build in builds:
1359             for tst_name, tst_data in data[job][str(build)].items():
1360                 if tst_name.lower() in table.get(u"ignore-list", list()):
1361                     continue
1362                 if tbl_dict.get(tst_name, None) is None:
1363                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1364                     if not groups:
1365                         continue
1366                     nic = groups.group(0)
1367                     tbl_dict[tst_name] = {
1368                         u"name": f"{nic}-{tst_data[u'name']}",
1369                         u"data": OrderedDict()
1370                     }
1371                 try:
1372                     tbl_dict[tst_name][u"data"][str(build)] = \
1373                         tst_data[u"result"][u"receive-rate"]
1374                 except (TypeError, KeyError):
1375                     pass  # No data in output.xml for this test
1376
1377     tbl_lst = list()
1378     for tst_name in tbl_dict:
1379         data_t = tbl_dict[tst_name][u"data"]
1380         if len(data_t) < 2:
1381             continue
1382
1383         classification_lst, avgs = classify_anomalies(data_t)
1384
1385         win_size = min(len(data_t), table[u"window"])
1386         long_win_size = min(len(data_t), table[u"long-trend-window"])
1387
1388         try:
1389             max_long_avg = max(
1390                 [x for x in avgs[-long_win_size:-win_size]
1391                  if not isnan(x)])
1392         except ValueError:
1393             max_long_avg = nan
1394         last_avg = avgs[-1]
1395         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1396
1397         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1398             rel_change_last = nan
1399         else:
1400             rel_change_last = round(
1401                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1402
1403         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1404             rel_change_long = nan
1405         else:
1406             rel_change_long = round(
1407                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1408
1409         if classification_lst:
1410             if isnan(rel_change_last) and isnan(rel_change_long):
1411                 continue
1412             if isnan(last_avg) or isnan(rel_change_last) or \
1413                     isnan(rel_change_long):
1414                 continue
1415             tbl_lst.append(
1416                 [tbl_dict[tst_name][u"name"],
1417                  round(last_avg / 1000000, 2),
1418                  rel_change_last,
1419                  rel_change_long,
1420                  classification_lst[-win_size:].count(u"regression"),
1421                  classification_lst[-win_size:].count(u"progression")])
1422
1423     tbl_lst.sort(key=lambda rel: rel[0])
1424
1425     tbl_sorted = list()
1426     for nrr in range(table[u"window"], -1, -1):
1427         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1428         for nrp in range(table[u"window"], -1, -1):
1429             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1430             tbl_out.sort(key=lambda rel: rel[2])
1431             tbl_sorted.extend(tbl_out)
1432
1433     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1434
1435     logging.info(f"    Writing file: {file_name}")
1436     with open(file_name, u"wt") as file_handler:
1437         file_handler.write(header_str)
1438         for test in tbl_sorted:
1439             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1440
1441     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1442     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1443
1444
1445 def _generate_url(testbed, test_name):
1446     """Generate URL to a trending plot from the name of the test case.
1447
1448     :param testbed: The testbed used for testing.
1449     :param test_name: The name of the test case.
1450     :type testbed: str
1451     :type test_name: str
1452     :returns: The URL to the plot with the trending data for the given test
1453         case.
1454     :rtype str
1455     """
1456
1457     if u"x520" in test_name:
1458         nic = u"x520"
1459     elif u"x710" in test_name:
1460         nic = u"x710"
1461     elif u"xl710" in test_name:
1462         nic = u"xl710"
1463     elif u"xxv710" in test_name:
1464         nic = u"xxv710"
1465     elif u"vic1227" in test_name:
1466         nic = u"vic1227"
1467     elif u"vic1385" in test_name:
1468         nic = u"vic1385"
1469     elif u"x553" in test_name:
1470         nic = u"x553"
1471     else:
1472         nic = u""
1473
1474     if u"64b" in test_name:
1475         frame_size = u"64b"
1476     elif u"78b" in test_name:
1477         frame_size = u"78b"
1478     elif u"imix" in test_name:
1479         frame_size = u"imix"
1480     elif u"9000b" in test_name:
1481         frame_size = u"9000b"
1482     elif u"1518b" in test_name:
1483         frame_size = u"1518b"
1484     elif u"114b" in test_name:
1485         frame_size = u"114b"
1486     else:
1487         frame_size = u""
1488
1489     if u"1t1c" in test_name or \
1490         (u"-1c-" in test_name and
1491          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1492         cores = u"1t1c"
1493     elif u"2t2c" in test_name or \
1494          (u"-2c-" in test_name and
1495           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1496         cores = u"2t2c"
1497     elif u"4t4c" in test_name or \
1498          (u"-4c-" in test_name and
1499           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1500         cores = u"4t4c"
1501     elif u"2t1c" in test_name or \
1502          (u"-1c-" in test_name and
1503           testbed in (u"2n-skx", u"3n-skx")):
1504         cores = u"2t1c"
1505     elif u"4t2c" in test_name:
1506         cores = u"4t2c"
1507     elif u"8t4c" in test_name:
1508         cores = u"8t4c"
1509     else:
1510         cores = u""
1511
1512     if u"testpmd" in test_name:
1513         driver = u"testpmd"
1514     elif u"l3fwd" in test_name:
1515         driver = u"l3fwd"
1516     elif u"avf" in test_name:
1517         driver = u"avf"
1518     elif u"dnv" in testbed or u"tsh" in testbed:
1519         driver = u"ixgbe"
1520     else:
1521         driver = u"i40e"
1522
1523     if u"acl" in test_name or \
1524             u"macip" in test_name or \
1525             u"nat" in test_name or \
1526             u"policer" in test_name or \
1527             u"cop" in test_name:
1528         bsf = u"features"
1529     elif u"scale" in test_name:
1530         bsf = u"scale"
1531     elif u"base" in test_name:
1532         bsf = u"base"
1533     else:
1534         bsf = u"base"
1535
1536     if u"114b" in test_name and u"vhost" in test_name:
1537         domain = u"vts"
1538     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1539         domain = u"dpdk"
1540     elif u"memif" in test_name:
1541         domain = u"container_memif"
1542     elif u"srv6" in test_name:
1543         domain = u"srv6"
1544     elif u"vhost" in test_name:
1545         domain = u"vhost"
1546         if u"vppl2xc" in test_name:
1547             driver += u"-vpp"
1548         else:
1549             driver += u"-testpmd"
1550         if u"lbvpplacp" in test_name:
1551             bsf += u"-link-bonding"
1552     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1553         domain = u"nf_service_density_vnfc"
1554     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1555         domain = u"nf_service_density_cnfc"
1556     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1557         domain = u"nf_service_density_cnfp"
1558     elif u"ipsec" in test_name:
1559         domain = u"ipsec"
1560         if u"sw" in test_name:
1561             bsf += u"-sw"
1562         elif u"hw" in test_name:
1563             bsf += u"-hw"
1564     elif u"ethip4vxlan" in test_name:
1565         domain = u"ip4_tunnels"
1566     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1567         domain = u"ip4"
1568     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1569         domain = u"ip6"
1570     elif u"l2xcbase" in test_name or \
1571             u"l2xcscale" in test_name or \
1572             u"l2bdbasemaclrn" in test_name or \
1573             u"l2bdscale" in test_name or \
1574             u"l2patch" in test_name:
1575         domain = u"l2"
1576     else:
1577         domain = u""
1578
1579     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1580     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1581
1582     return file_name + anchor_name
1583
1584
1585 def table_perf_trending_dash_html(table, input_data):
1586     """Generate the table(s) with algorithm:
1587     table_perf_trending_dash_html specified in the specification
1588     file.
1589
1590     :param table: Table to generate.
1591     :param input_data: Data to process.
1592     :type table: dict
1593     :type input_data: InputData
1594     """
1595
1596     _ = input_data
1597
1598     if not table.get(u"testbed", None):
1599         logging.error(
1600             f"The testbed is not defined for the table "
1601             f"{table.get(u'title', u'')}."
1602         )
1603         return
1604
1605     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1606
1607     try:
1608         with open(table[u"input-file"], u'rt') as csv_file:
1609             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1610     except KeyError:
1611         logging.warning(u"The input file is not defined.")
1612         return
1613     except csv.Error as err:
1614         logging.warning(
1615             f"Not possible to process the file {table[u'input-file']}.\n"
1616             f"{repr(err)}"
1617         )
1618         return
1619
1620     # Table:
1621     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1622
1623     # Table header:
1624     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1625     for idx, item in enumerate(csv_lst[0]):
1626         alignment = u"left" if idx == 0 else u"center"
1627         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1628         thead.text = item
1629
1630     # Rows:
1631     colors = {
1632         u"regression": (
1633             u"#ffcccc",
1634             u"#ff9999"
1635         ),
1636         u"progression": (
1637             u"#c6ecc6",
1638             u"#9fdf9f"
1639         ),
1640         u"normal": (
1641             u"#e9f1fb",
1642             u"#d4e4f7"
1643         )
1644     }
1645     for r_idx, row in enumerate(csv_lst[1:]):
1646         if int(row[4]):
1647             color = u"regression"
1648         elif int(row[5]):
1649             color = u"progression"
1650         else:
1651             color = u"normal"
1652         trow = ET.SubElement(
1653             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1654         )
1655
1656         # Columns:
1657         for c_idx, item in enumerate(row):
1658             tdata = ET.SubElement(
1659                 trow,
1660                 u"td",
1661                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1662             )
1663             # Name:
1664             if c_idx == 0:
1665                 ref = ET.SubElement(
1666                     tdata,
1667                     u"a",
1668                     attrib=dict(
1669                         href=f"../trending/"
1670                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1671                     )
1672                 )
1673                 ref.text = item
1674             else:
1675                 tdata.text = item
1676     try:
1677         with open(table[u"output-file"], u'w') as html_file:
1678             logging.info(f"    Writing file: {table[u'output-file']}")
1679             html_file.write(u".. raw:: html\n\n\t")
1680             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1681             html_file.write(u"\n\t<p><br><br></p>\n")
1682     except KeyError:
1683         logging.warning(u"The output file is not defined.")
1684         return
1685
1686
1687 def table_last_failed_tests(table, input_data):
1688     """Generate the table(s) with algorithm: table_last_failed_tests
1689     specified in the specification file.
1690
1691     :param table: Table to generate.
1692     :param input_data: Data to process.
1693     :type table: pandas.Series
1694     :type input_data: InputData
1695     """
1696
1697     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1698
1699     # Transform the data
1700     logging.info(
1701         f"    Creating the data set for the {table.get(u'type', u'')} "
1702         f"{table.get(u'title', u'')}."
1703     )
1704
1705     data = input_data.filter_data(table, continue_on_error=True)
1706
1707     if data is None or data.empty:
1708         logging.warning(
1709             f"    No data for the {table.get(u'type', u'')} "
1710             f"{table.get(u'title', u'')}."
1711         )
1712         return
1713
1714     tbl_list = list()
1715     for job, builds in table[u"data"].items():
1716         for build in builds:
1717             build = str(build)
1718             try:
1719                 version = input_data.metadata(job, build).get(u"version", u"")
1720             except KeyError:
1721                 logging.error(f"Data for {job}: {build} is not present.")
1722                 return
1723             tbl_list.append(build)
1724             tbl_list.append(version)
1725             failed_tests = list()
1726             passed = 0
1727             failed = 0
1728             for tst_data in data[job][build].values:
1729                 if tst_data[u"status"] != u"FAIL":
1730                     passed += 1
1731                     continue
1732                 failed += 1
1733                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1734                 if not groups:
1735                     continue
1736                 nic = groups.group(0)
1737                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1738             tbl_list.append(str(passed))
1739             tbl_list.append(str(failed))
1740             tbl_list.extend(failed_tests)
1741
1742     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1743     logging.info(f"    Writing file: {file_name}")
1744     with open(file_name, u"wt") as file_handler:
1745         for test in tbl_list:
1746             file_handler.write(test + u'\n')
1747
1748
1749 def table_failed_tests(table, input_data):
1750     """Generate the table(s) with algorithm: table_failed_tests
1751     specified in the specification file.
1752
1753     :param table: Table to generate.
1754     :param input_data: Data to process.
1755     :type table: pandas.Series
1756     :type input_data: InputData
1757     """
1758
1759     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1760
1761     # Transform the data
1762     logging.info(
1763         f"    Creating the data set for the {table.get(u'type', u'')} "
1764         f"{table.get(u'title', u'')}."
1765     )
1766     data = input_data.filter_data(table, continue_on_error=True)
1767
1768     # Prepare the header of the tables
1769     header = [
1770         u"Test Case",
1771         u"Failures [#]",
1772         u"Last Failure [Time]",
1773         u"Last Failure [VPP-Build-Id]",
1774         u"Last Failure [CSIT-Job-Build-Id]"
1775     ]
1776
1777     # Generate the data for the table according to the model in the table
1778     # specification
1779
1780     now = dt.utcnow()
1781     timeperiod = timedelta(int(table.get(u"window", 7)))
1782
1783     tbl_dict = dict()
1784     for job, builds in table[u"data"].items():
1785         for build in builds:
1786             build = str(build)
1787             for tst_name, tst_data in data[job][build].items():
1788                 if tst_name.lower() in table.get(u"ignore-list", list()):
1789                     continue
1790                 if tbl_dict.get(tst_name, None) is None:
1791                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1792                     if not groups:
1793                         continue
1794                     nic = groups.group(0)
1795                     tbl_dict[tst_name] = {
1796                         u"name": f"{nic}-{tst_data[u'name']}",
1797                         u"data": OrderedDict()
1798                     }
1799                 try:
1800                     generated = input_data.metadata(job, build).\
1801                         get(u"generated", u"")
1802                     if not generated:
1803                         continue
1804                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1805                     if (now - then) <= timeperiod:
1806                         tbl_dict[tst_name][u"data"][build] = (
1807                             tst_data[u"status"],
1808                             generated,
1809                             input_data.metadata(job, build).get(u"version",
1810                                                                 u""),
1811                             build
1812                         )
1813                 except (TypeError, KeyError) as err:
1814                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1815
1816     max_fails = 0
1817     tbl_lst = list()
1818     for tst_data in tbl_dict.values():
1819         fails_nr = 0
1820         fails_last_date = u""
1821         fails_last_vpp = u""
1822         fails_last_csit = u""
1823         for val in tst_data[u"data"].values():
1824             if val[0] == u"FAIL":
1825                 fails_nr += 1
1826                 fails_last_date = val[1]
1827                 fails_last_vpp = val[2]
1828                 fails_last_csit = val[3]
1829         if fails_nr:
1830             max_fails = fails_nr if fails_nr > max_fails else max_fails
1831             tbl_lst.append(
1832                 [
1833                     tst_data[u"name"],
1834                     fails_nr,
1835                     fails_last_date,
1836                     fails_last_vpp,
1837                     f"mrr-daily-build-{fails_last_csit}"
1838                 ]
1839             )
1840
1841     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1842     tbl_sorted = list()
1843     for nrf in range(max_fails, -1, -1):
1844         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1845         tbl_sorted.extend(tbl_fails)
1846
1847     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1848     logging.info(f"    Writing file: {file_name}")
1849     with open(file_name, u"wt") as file_handler:
1850         file_handler.write(u",".join(header) + u"\n")
1851         for test in tbl_sorted:
1852             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1853
1854     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1855     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1856
1857
1858 def table_failed_tests_html(table, input_data):
1859     """Generate the table(s) with algorithm: table_failed_tests_html
1860     specified in the specification file.
1861
1862     :param table: Table to generate.
1863     :param input_data: Data to process.
1864     :type table: pandas.Series
1865     :type input_data: InputData
1866     """
1867
1868     _ = input_data
1869
1870     if not table.get(u"testbed", None):
1871         logging.error(
1872             f"The testbed is not defined for the table "
1873             f"{table.get(u'title', u'')}."
1874         )
1875         return
1876
1877     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1878
1879     try:
1880         with open(table[u"input-file"], u'rt') as csv_file:
1881             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1882     except KeyError:
1883         logging.warning(u"The input file is not defined.")
1884         return
1885     except csv.Error as err:
1886         logging.warning(
1887             f"Not possible to process the file {table[u'input-file']}.\n"
1888             f"{repr(err)}"
1889         )
1890         return
1891
1892     # Table:
1893     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1894
1895     # Table header:
1896     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1897     for idx, item in enumerate(csv_lst[0]):
1898         alignment = u"left" if idx == 0 else u"center"
1899         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1900         thead.text = item
1901
1902     # Rows:
1903     colors = (u"#e9f1fb", u"#d4e4f7")
1904     for r_idx, row in enumerate(csv_lst[1:]):
1905         background = colors[r_idx % 2]
1906         trow = ET.SubElement(
1907             failed_tests, u"tr", attrib=dict(bgcolor=background)
1908         )
1909
1910         # Columns:
1911         for c_idx, item in enumerate(row):
1912             tdata = ET.SubElement(
1913                 trow,
1914                 u"td",
1915                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1916             )
1917             # Name:
1918             if c_idx == 0:
1919                 ref = ET.SubElement(
1920                     tdata,
1921                     u"a",
1922                     attrib=dict(
1923                         href=f"../trending/"
1924                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1925                     )
1926                 )
1927                 ref.text = item
1928             else:
1929                 tdata.text = item
1930     try:
1931         with open(table[u"output-file"], u'w') as html_file:
1932             logging.info(f"    Writing file: {table[u'output-file']}")
1933             html_file.write(u".. raw:: html\n\n\t")
1934             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1935             html_file.write(u"\n\t<p><br><br></p>\n")
1936     except KeyError:
1937         logging.warning(u"The output file is not defined.")
1938         return