Report: Detailed test results
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_details": table_details,
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100     data.sort_index(inplace=True)
101
102     suites = input_data.filter_data(
103         table,
104         continue_on_error=True,
105         data_set=u"suites"
106     )
107     if suites.empty:
108         return
109     suites = input_data.merge_data(suites)
110
111     def _generate_html_table(tst_data):
112         """Generate an HTML table with operational data for the given test.
113
114         :param tst_data: Test data to be used to generate the table.
115         :type tst_data: pandas.Series
116         :returns: HTML table with operational data.
117         :rtype: str
118         """
119
120         colors = {
121             u"header": u"#7eade7",
122             u"empty": u"#ffffff",
123             u"body": (u"#e9f1fb", u"#d4e4f7")
124         }
125
126         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
127
128         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129         thead = ET.SubElement(
130             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
131         )
132         thead.text = tst_data[u"name"]
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = u"\t"
139
140         if tst_data.get(u"show-run", u"No Data") == u"No Data":
141             trow = ET.SubElement(
142                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
143             )
144             tcol = ET.SubElement(
145                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
146             )
147             tcol.text = u"No Data"
148             return str(ET.tostring(tbl, encoding=u"unicode"))
149
150         tbl_hdr = (
151             u"Name",
152             u"Nr of Vectors",
153             u"Nr of Packets",
154             u"Suspends",
155             u"Cycles per Packet",
156             u"Average Vector Size"
157         )
158
159         for dut_name, dut_data in tst_data[u"show-run"].items():
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
162             )
163             tcol = ET.SubElement(
164                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
165             )
166             if dut_data.get(u"threads", None) is None:
167                 tcol.text = u"No Data"
168                 continue
169             bold = ET.SubElement(tcol, u"b")
170             bold.text = dut_name
171
172             trow = ET.SubElement(
173                 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
174             )
175             tcol = ET.SubElement(
176                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
177             )
178             bold = ET.SubElement(tcol, u"b")
179             bold.text = (
180                 f"Host IP: {dut_data.get(u'host', '')}, "
181                 f"Socket: {dut_data.get(u'socket', '')}"
182             )
183             trow = ET.SubElement(
184                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
185             )
186             thead = ET.SubElement(
187                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
188             )
189             thead.text = u"\t"
190
191             for thread_nr, thread in dut_data[u"threads"].items():
192                 trow = ET.SubElement(
193                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
194                 )
195                 tcol = ET.SubElement(
196                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
197                 )
198                 bold = ET.SubElement(tcol, u"b")
199                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200                 trow = ET.SubElement(
201                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
202                 )
203                 for idx, col in enumerate(tbl_hdr):
204                     tcol = ET.SubElement(
205                         trow, u"td",
206                         attrib=dict(align=u"right" if idx else u"left")
207                     )
208                     font = ET.SubElement(
209                         tcol, u"font", attrib=dict(size=u"2")
210                     )
211                     bold = ET.SubElement(font, u"b")
212                     bold.text = col
213                 for row_nr, row in enumerate(thread):
214                     trow = ET.SubElement(
215                         tbl, u"tr",
216                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
217                     )
218                     for idx, col in enumerate(row):
219                         tcol = ET.SubElement(
220                             trow, u"td",
221                             attrib=dict(align=u"right" if idx else u"left")
222                         )
223                         font = ET.SubElement(
224                             tcol, u"font", attrib=dict(size=u"2")
225                         )
226                         if isinstance(col, float):
227                             font.text = f"{col:.2f}"
228                         else:
229                             font.text = str(col)
230                 trow = ET.SubElement(
231                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
232                 )
233                 thead = ET.SubElement(
234                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
235                 )
236                 thead.text = u"\t"
237
238         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239         thead = ET.SubElement(
240             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241         )
242         font = ET.SubElement(
243             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
244         )
245         font.text = u"."
246
247         return str(ET.tostring(tbl, encoding=u"unicode"))
248
249     for suite in suites.values:
250         html_table = str()
251         for test_data in data.values:
252             if test_data[u"parent"] not in suite[u"name"]:
253                 continue
254             html_table += _generate_html_table(test_data)
255         if not html_table:
256             continue
257         try:
258             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259             with open(f"{file_name}", u'w') as html_file:
260                 logging.info(f"    Writing file: {file_name}")
261                 html_file.write(u".. raw:: html\n\n\t")
262                 html_file.write(html_table)
263                 html_file.write(u"\n\t<p><br><br></p>\n")
264         except KeyError:
265             logging.warning(u"The output file is not defined.")
266             return
267     logging.info(u"  Done.")
268
269
270 def table_merged_details(table, input_data):
271     """Generate the table(s) with algorithm: table_merged_details
272     specified in the specification file.
273
274     :param table: Table to generate.
275     :param input_data: Data to process.
276     :type table: pandas.Series
277     :type input_data: InputData
278     """
279
280     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
281     # Transform the data
282     logging.info(
283         f"    Creating the data set for the {table.get(u'type', u'')} "
284         f"{table.get(u'title', u'')}."
285     )
286     data = input_data.filter_data(table, continue_on_error=True)
287     data = input_data.merge_data(data)
288     data.sort_index(inplace=True)
289
290     logging.info(
291         f"    Creating the data set for the {table.get(u'type', u'')} "
292         f"{table.get(u'title', u'')}."
293     )
294     suites = input_data.filter_data(
295         table, continue_on_error=True, data_set=u"suites")
296     suites = input_data.merge_data(suites)
297
298     # Prepare the header of the tables
299     header = list()
300     for column in table[u"columns"]:
301         header.append(
302             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
303         )
304
305     for suite in suites.values:
306         # Generate data
307         suite_name = suite[u"name"]
308         table_lst = list()
309         for test in data.keys():
310             if data[test][u"parent"] not in suite_name:
311                 continue
312             row_lst = list()
313             for column in table[u"columns"]:
314                 try:
315                     col_data = str(data[test][column[
316                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
317                     col_data = col_data.replace(
318                         u"No Data", u"Not Captured     "
319                     )
320                     if column[u"data"].split(u" ")[1] in (u"name", ):
321                         if len(col_data) > 30:
322                             col_data_lst = col_data.split(u"-")
323                             half = int(len(col_data_lst) / 2)
324                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
325                                        f"- |br| " \
326                                        f"{u'-'.join(col_data_lst[half:])}"
327                         col_data = f" |prein| {col_data} |preout| "
328                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
329                         col_data = f" |prein| {col_data} |preout| "
330                     elif column[u"data"].split(u" ")[1] in \
331                         (u"conf-history", u"show-run"):
332                         col_data = col_data.replace(u" |br| ", u"", 1)
333                         col_data = f" |prein| {col_data[:-5]} |preout| "
334                     row_lst.append(f'"{col_data}"')
335                 except KeyError:
336                     row_lst.append(u'"Not captured"')
337             table_lst.append(row_lst)
338
339         # Write the data to file
340         if table_lst:
341             file_name = f"{table[u'output-file']}_{suite_name}.csv"
342             logging.info(f"      Writing file: {file_name}")
343             with open(file_name, u"wt") as file_handler:
344                 file_handler.write(u",".join(header) + u"\n")
345                 for item in table_lst:
346                     file_handler.write(u",".join(item) + u"\n")
347
348     logging.info(u"  Done.")
349
350
351 def _tpc_modify_test_name(test_name):
352     """Modify a test name by replacing its parts.
353
354     :param test_name: Test name to be modified.
355     :type test_name: str
356     :returns: Modified test name.
357     :rtype: str
358     """
359     test_name_mod = test_name.\
360         replace(u"-ndrpdrdisc", u""). \
361         replace(u"-ndrpdr", u"").\
362         replace(u"-pdrdisc", u""). \
363         replace(u"-ndrdisc", u"").\
364         replace(u"-pdr", u""). \
365         replace(u"-ndr", u""). \
366         replace(u"1t1c", u"1c").\
367         replace(u"2t1c", u"1c"). \
368         replace(u"2t2c", u"2c").\
369         replace(u"4t2c", u"2c"). \
370         replace(u"4t4c", u"4c").\
371         replace(u"8t4c", u"4c")
372
373     return re.sub(REGEX_NIC, u"", test_name_mod)
374
375
376 def _tpc_modify_displayed_test_name(test_name):
377     """Modify a test name which is displayed in a table by replacing its parts.
378
379     :param test_name: Test name to be modified.
380     :type test_name: str
381     :returns: Modified test name.
382     :rtype: str
383     """
384     return test_name.\
385         replace(u"1t1c", u"1c").\
386         replace(u"2t1c", u"1c"). \
387         replace(u"2t2c", u"2c").\
388         replace(u"4t2c", u"2c"). \
389         replace(u"4t4c", u"4c").\
390         replace(u"8t4c", u"4c")
391
392
393 def _tpc_insert_data(target, src, include_tests):
394     """Insert src data to the target structure.
395
396     :param target: Target structure where the data is placed.
397     :param src: Source data to be placed into the target stucture.
398     :param include_tests: Which results will be included (MRR, NDR, PDR).
399     :type target: list
400     :type src: dict
401     :type include_tests: str
402     """
403     try:
404         if include_tests == u"MRR":
405             target.append(src[u"result"][u"receive-rate"])
406         elif include_tests == u"PDR":
407             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
408         elif include_tests == u"NDR":
409             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
410     except (KeyError, TypeError):
411         pass
412
413
414 def _tpc_sort_table(table):
415     """Sort the table this way:
416
417     1. Put "New in CSIT-XXXX" at the first place.
418     2. Put "See footnote" at the second place.
419     3. Sort the rest by "Delta".
420
421     :param table: Table to sort.
422     :type table: list
423     :returns: Sorted table.
424     :rtype: list
425     """
426
427
428     tbl_new = list()
429     tbl_see = list()
430     tbl_delta = list()
431     for item in table:
432         if isinstance(item[-1], str):
433             if u"New in CSIT" in item[-1]:
434                 tbl_new.append(item)
435             elif u"See footnote" in item[-1]:
436                 tbl_see.append(item)
437         else:
438             tbl_delta.append(item)
439
440     # Sort the tables:
441     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
442     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
443     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
444     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
445
446     # Put the tables together:
447     table = list()
448     table.extend(tbl_new)
449     table.extend(tbl_see)
450     table.extend(tbl_delta)
451
452     return table
453
454
455 def _tpc_generate_html_table(header, data, output_file_name):
456     """Generate html table from input data with simple sorting possibility.
457
458     :param header: Table header.
459     :param data: Input data to be included in the table. It is a list of lists.
460         Inner lists are rows in the table. All inner lists must be of the same
461         length. The length of these lists must be the same as the length of the
462         header.
463     :param output_file_name: The name (relative or full path) where the
464         generated html table is written.
465     :type header: list
466     :type data: list of lists
467     :type output_file_name: str
468     """
469
470     df_data = pd.DataFrame(data, columns=header)
471
472     df_sorted = [df_data.sort_values(
473         by=[key, header[0]], ascending=[True, True]
474         if key != header[0] else [False, True]) for key in header]
475     df_sorted_rev = [df_data.sort_values(
476         by=[key, header[0]], ascending=[False, True]
477         if key != header[0] else [True, True]) for key in header]
478     df_sorted.extend(df_sorted_rev)
479
480     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
481                    for idx in range(len(df_data))]]
482     table_header = dict(
483         values=[f"<b>{item}</b>" for item in header],
484         fill_color=u"#7eade7",
485         align=[u"left", u"center"]
486     )
487
488     fig = go.Figure()
489
490     for table in df_sorted:
491         columns = [table.get(col) for col in header]
492         fig.add_trace(
493             go.Table(
494                 columnwidth=[30, 10],
495                 header=table_header,
496                 cells=dict(
497                     values=columns,
498                     fill_color=fill_color,
499                     align=[u"left", u"right"]
500                 )
501             )
502         )
503
504     buttons = list()
505     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
506     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
507     menu_items.extend(menu_items_rev)
508     for idx, hdr in enumerate(menu_items):
509         visible = [False, ] * len(menu_items)
510         visible[idx] = True
511         buttons.append(
512             dict(
513                 label=hdr.replace(u" [Mpps]", u""),
514                 method=u"update",
515                 args=[{u"visible": visible}],
516             )
517         )
518
519     fig.update_layout(
520         updatemenus=[
521             go.layout.Updatemenu(
522                 type=u"dropdown",
523                 direction=u"down",
524                 x=0.03,
525                 xanchor=u"left",
526                 y=1.045,
527                 yanchor=u"top",
528                 active=len(menu_items) - 1,
529                 buttons=list(buttons)
530             )
531         ],
532         annotations=[
533             go.layout.Annotation(
534                 text=u"<b>Sort by:</b>",
535                 x=0,
536                 xref=u"paper",
537                 y=1.035,
538                 yref=u"paper",
539                 align=u"left",
540                 showarrow=False
541             )
542         ]
543     )
544
545     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
546
547
548 def table_perf_comparison(table, input_data):
549     """Generate the table(s) with algorithm: table_perf_comparison
550     specified in the specification file.
551
552     :param table: Table to generate.
553     :param input_data: Data to process.
554     :type table: pandas.Series
555     :type input_data: InputData
556     """
557
558     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
559
560     # Transform the data
561     logging.info(
562         f"    Creating the data set for the {table.get(u'type', u'')} "
563         f"{table.get(u'title', u'')}."
564     )
565     data = input_data.filter_data(table, continue_on_error=True)
566
567     # Prepare the header of the tables
568     try:
569         header = [u"Test case", ]
570
571         if table[u"include-tests"] == u"MRR":
572             hdr_param = u"Rec Rate"
573         else:
574             hdr_param = u"Thput"
575
576         history = table.get(u"history", list())
577         for item in history:
578             header.extend(
579                 [
580                     f"{item[u'title']} {hdr_param} [Mpps]",
581                     f"{item[u'title']} Stdev [Mpps]"
582                 ]
583             )
584         header.extend(
585             [
586                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
587                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
588                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
589                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
590                 u"Delta [%]"
591             ]
592         )
593         header_str = u",".join(header) + u"\n"
594     except (AttributeError, KeyError) as err:
595         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
596         return
597
598     # Prepare data to the table:
599     tbl_dict = dict()
600     # topo = ""
601     for job, builds in table[u"reference"][u"data"].items():
602         # topo = u"2n-skx" if u"2n-skx" in job else u""
603         for build in builds:
604             for tst_name, tst_data in data[job][str(build)].items():
605                 tst_name_mod = _tpc_modify_test_name(tst_name)
606                 if u"across topologies" in table[u"title"].lower():
607                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
608                 if tbl_dict.get(tst_name_mod, None) is None:
609                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
610                     nic = groups.group(0) if groups else u""
611                     name = \
612                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
613                     if u"across testbeds" in table[u"title"].lower() or \
614                             u"across topologies" in table[u"title"].lower():
615                         name = _tpc_modify_displayed_test_name(name)
616                     tbl_dict[tst_name_mod] = {
617                         u"name": name,
618                         u"ref-data": list(),
619                         u"cmp-data": list()
620                     }
621                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
622                                  src=tst_data,
623                                  include_tests=table[u"include-tests"])
624
625     replacement = table[u"reference"].get(u"data-replacement", None)
626     if replacement:
627         create_new_list = True
628         rpl_data = input_data.filter_data(
629             table, data=replacement, continue_on_error=True)
630         for job, builds in replacement.items():
631             for build in builds:
632                 for tst_name, tst_data in rpl_data[job][str(build)].items():
633                     tst_name_mod = _tpc_modify_test_name(tst_name)
634                     if u"across topologies" in table[u"title"].lower():
635                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
636                     if tbl_dict.get(tst_name_mod, None) is None:
637                         name = \
638                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
639                         if u"across testbeds" in table[u"title"].lower() or \
640                                 u"across topologies" in table[u"title"].lower():
641                             name = _tpc_modify_displayed_test_name(name)
642                         tbl_dict[tst_name_mod] = {
643                             u"name": name,
644                             u"ref-data": list(),
645                             u"cmp-data": list()
646                         }
647                     if create_new_list:
648                         create_new_list = False
649                         tbl_dict[tst_name_mod][u"ref-data"] = list()
650
651                     _tpc_insert_data(
652                         target=tbl_dict[tst_name_mod][u"ref-data"],
653                         src=tst_data,
654                         include_tests=table[u"include-tests"]
655                     )
656
657     for job, builds in table[u"compare"][u"data"].items():
658         for build in builds:
659             for tst_name, tst_data in data[job][str(build)].items():
660                 tst_name_mod = _tpc_modify_test_name(tst_name)
661                 if u"across topologies" in table[u"title"].lower():
662                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
663                 if tbl_dict.get(tst_name_mod, None) is None:
664                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
665                     nic = groups.group(0) if groups else u""
666                     name = \
667                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
668                     if u"across testbeds" in table[u"title"].lower() or \
669                             u"across topologies" in table[u"title"].lower():
670                         name = _tpc_modify_displayed_test_name(name)
671                     tbl_dict[tst_name_mod] = {
672                         u"name": name,
673                         u"ref-data": list(),
674                         u"cmp-data": list()
675                     }
676                 _tpc_insert_data(
677                     target=tbl_dict[tst_name_mod][u"cmp-data"],
678                     src=tst_data,
679                     include_tests=table[u"include-tests"]
680                 )
681
682     replacement = table[u"compare"].get(u"data-replacement", None)
683     if replacement:
684         create_new_list = True
685         rpl_data = input_data.filter_data(
686             table, data=replacement, continue_on_error=True)
687         for job, builds in replacement.items():
688             for build in builds:
689                 for tst_name, tst_data in rpl_data[job][str(build)].items():
690                     tst_name_mod = _tpc_modify_test_name(tst_name)
691                     if u"across topologies" in table[u"title"].lower():
692                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
693                     if tbl_dict.get(tst_name_mod, None) is None:
694                         name = \
695                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
696                         if u"across testbeds" in table[u"title"].lower() or \
697                                 u"across topologies" in table[u"title"].lower():
698                             name = _tpc_modify_displayed_test_name(name)
699                         tbl_dict[tst_name_mod] = {
700                             u"name": name,
701                             u"ref-data": list(),
702                             u"cmp-data": list()
703                         }
704                     if create_new_list:
705                         create_new_list = False
706                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
707
708                     _tpc_insert_data(
709                         target=tbl_dict[tst_name_mod][u"cmp-data"],
710                         src=tst_data,
711                         include_tests=table[u"include-tests"]
712                     )
713
714     for item in history:
715         for job, builds in item[u"data"].items():
716             for build in builds:
717                 for tst_name, tst_data in data[job][str(build)].items():
718                     tst_name_mod = _tpc_modify_test_name(tst_name)
719                     if u"across topologies" in table[u"title"].lower():
720                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
721                     if tbl_dict.get(tst_name_mod, None) is None:
722                         continue
723                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
724                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
725                     if tbl_dict[tst_name_mod][u"history"].\
726                             get(item[u"title"], None) is None:
727                         tbl_dict[tst_name_mod][u"history"][item[
728                             u"title"]] = list()
729                     try:
730                         if table[u"include-tests"] == u"MRR":
731                             res = tst_data[u"result"][u"receive-rate"]
732                         elif table[u"include-tests"] == u"PDR":
733                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
734                         elif table[u"include-tests"] == u"NDR":
735                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
736                         else:
737                             continue
738                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
739                             append(res)
740                     except (TypeError, KeyError):
741                         pass
742
743     tbl_lst = list()
744     footnote = False
745     for tst_name in tbl_dict:
746         item = [tbl_dict[tst_name][u"name"], ]
747         if history:
748             if tbl_dict[tst_name].get(u"history", None) is not None:
749                 for hist_data in tbl_dict[tst_name][u"history"].values():
750                     if hist_data:
751                         item.append(round(mean(hist_data) / 1000000, 2))
752                         item.append(round(stdev(hist_data) / 1000000, 2))
753                     else:
754                         item.extend([u"Not tested", u"Not tested"])
755             else:
756                 item.extend([u"Not tested", u"Not tested"])
757         data_t = tbl_dict[tst_name][u"ref-data"]
758         if data_t:
759             item.append(round(mean(data_t) / 1000000, 2))
760             item.append(round(stdev(data_t) / 1000000, 2))
761         else:
762             item.extend([u"Not tested", u"Not tested"])
763         data_t = tbl_dict[tst_name][u"cmp-data"]
764         if data_t:
765             item.append(round(mean(data_t) / 1000000, 2))
766             item.append(round(stdev(data_t) / 1000000, 2))
767         else:
768             item.extend([u"Not tested", u"Not tested"])
769         if item[-2] == u"Not tested":
770             pass
771         elif item[-4] == u"Not tested":
772             item.append(u"New in CSIT-2001")
773         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
774         #     item.append(u"See footnote [1]")
775         #     footnote = True
776         elif item[-4] != 0:
777             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
778         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
779             tbl_lst.append(item)
780
781     tbl_lst = _tpc_sort_table(tbl_lst)
782
783     # Generate csv tables:
784     csv_file = f"{table[u'output-file']}.csv"
785     with open(csv_file, u"wt") as file_handler:
786         file_handler.write(header_str)
787         for test in tbl_lst:
788             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
789
790     txt_file_name = f"{table[u'output-file']}.txt"
791     convert_csv_to_pretty_txt(csv_file, txt_file_name)
792
793     if footnote:
794         with open(txt_file_name, u'a') as txt_file:
795             txt_file.writelines([
796                 u"\nFootnotes:\n",
797                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
798                 u"2-node testbeds, dot1q encapsulation is now used on both "
799                 u"links of SUT.\n",
800                 u"    Previously dot1q was used only on a single link with the "
801                 u"other link carrying untagged Ethernet frames. This changes "
802                 u"results\n",
803                 u"    in slightly lower throughput in CSIT-1908 for these "
804                 u"tests. See release notes."
805             ])
806
807     # Generate html table:
808     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
809
810
811 def table_perf_comparison_nic(table, input_data):
812     """Generate the table(s) with algorithm: table_perf_comparison
813     specified in the specification file.
814
815     :param table: Table to generate.
816     :param input_data: Data to process.
817     :type table: pandas.Series
818     :type input_data: InputData
819     """
820
821     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
822
823     # Transform the data
824     logging.info(
825         f"    Creating the data set for the {table.get(u'type', u'')} "
826         f"{table.get(u'title', u'')}."
827     )
828     data = input_data.filter_data(table, continue_on_error=True)
829
830     # Prepare the header of the tables
831     try:
832         header = [u"Test case", ]
833
834         if table[u"include-tests"] == u"MRR":
835             hdr_param = u"Rec Rate"
836         else:
837             hdr_param = u"Thput"
838
839         history = table.get(u"history", list())
840         for item in history:
841             header.extend(
842                 [
843                     f"{item[u'title']} {hdr_param} [Mpps]",
844                     f"{item[u'title']} Stdev [Mpps]"
845                 ]
846             )
847         header.extend(
848             [
849                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
850                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
851                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
852                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
853                 u"Delta [%]"
854             ]
855         )
856         header_str = u",".join(header) + u"\n"
857     except (AttributeError, KeyError) as err:
858         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
859         return
860
861     # Prepare data to the table:
862     tbl_dict = dict()
863     # topo = u""
864     for job, builds in table[u"reference"][u"data"].items():
865         # topo = u"2n-skx" if u"2n-skx" in job else u""
866         for build in builds:
867             for tst_name, tst_data in data[job][str(build)].items():
868                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
869                     continue
870                 tst_name_mod = _tpc_modify_test_name(tst_name)
871                 if u"across topologies" in table[u"title"].lower():
872                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
873                 if tbl_dict.get(tst_name_mod, None) is None:
874                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
875                     if u"across testbeds" in table[u"title"].lower() or \
876                             u"across topologies" in table[u"title"].lower():
877                         name = _tpc_modify_displayed_test_name(name)
878                     tbl_dict[tst_name_mod] = {
879                         u"name": name,
880                         u"ref-data": list(),
881                         u"cmp-data": list()
882                     }
883                 _tpc_insert_data(
884                     target=tbl_dict[tst_name_mod][u"ref-data"],
885                     src=tst_data,
886                     include_tests=table[u"include-tests"]
887                 )
888
889     replacement = table[u"reference"].get(u"data-replacement", None)
890     if replacement:
891         create_new_list = True
892         rpl_data = input_data.filter_data(
893             table, data=replacement, continue_on_error=True)
894         for job, builds in replacement.items():
895             for build in builds:
896                 for tst_name, tst_data in rpl_data[job][str(build)].items():
897                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
898                         continue
899                     tst_name_mod = _tpc_modify_test_name(tst_name)
900                     if u"across topologies" in table[u"title"].lower():
901                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
902                     if tbl_dict.get(tst_name_mod, None) is None:
903                         name = \
904                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
905                         if u"across testbeds" in table[u"title"].lower() or \
906                                 u"across topologies" in table[u"title"].lower():
907                             name = _tpc_modify_displayed_test_name(name)
908                         tbl_dict[tst_name_mod] = {
909                             u"name": name,
910                             u"ref-data": list(),
911                             u"cmp-data": list()
912                         }
913                     if create_new_list:
914                         create_new_list = False
915                         tbl_dict[tst_name_mod][u"ref-data"] = list()
916
917                     _tpc_insert_data(
918                         target=tbl_dict[tst_name_mod][u"ref-data"],
919                         src=tst_data,
920                         include_tests=table[u"include-tests"]
921                     )
922
923     for job, builds in table[u"compare"][u"data"].items():
924         for build in builds:
925             for tst_name, tst_data in data[job][str(build)].items():
926                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
927                     continue
928                 tst_name_mod = _tpc_modify_test_name(tst_name)
929                 if u"across topologies" in table[u"title"].lower():
930                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
931                 if tbl_dict.get(tst_name_mod, None) is None:
932                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
933                     if u"across testbeds" in table[u"title"].lower() or \
934                             u"across topologies" in table[u"title"].lower():
935                         name = _tpc_modify_displayed_test_name(name)
936                     tbl_dict[tst_name_mod] = {
937                         u"name": name,
938                         u"ref-data": list(),
939                         u"cmp-data": list()
940                     }
941                 _tpc_insert_data(
942                     target=tbl_dict[tst_name_mod][u"cmp-data"],
943                     src=tst_data,
944                     include_tests=table[u"include-tests"]
945                 )
946
947     replacement = table[u"compare"].get(u"data-replacement", None)
948     if replacement:
949         create_new_list = True
950         rpl_data = input_data.filter_data(
951             table, data=replacement, continue_on_error=True)
952         for job, builds in replacement.items():
953             for build in builds:
954                 for tst_name, tst_data in rpl_data[job][str(build)].items():
955                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
956                         continue
957                     tst_name_mod = _tpc_modify_test_name(tst_name)
958                     if u"across topologies" in table[u"title"].lower():
959                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
960                     if tbl_dict.get(tst_name_mod, None) is None:
961                         name = \
962                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
963                         if u"across testbeds" in table[u"title"].lower() or \
964                                 u"across topologies" in table[u"title"].lower():
965                             name = _tpc_modify_displayed_test_name(name)
966                         tbl_dict[tst_name_mod] = {
967                             u"name": name,
968                             u"ref-data": list(),
969                             u"cmp-data": list()
970                         }
971                     if create_new_list:
972                         create_new_list = False
973                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
974
975                     _tpc_insert_data(
976                         target=tbl_dict[tst_name_mod][u"cmp-data"],
977                         src=tst_data,
978                         include_tests=table[u"include-tests"]
979                     )
980
981     for item in history:
982         for job, builds in item[u"data"].items():
983             for build in builds:
984                 for tst_name, tst_data in data[job][str(build)].items():
985                     if item[u"nic"] not in tst_data[u"tags"]:
986                         continue
987                     tst_name_mod = _tpc_modify_test_name(tst_name)
988                     if u"across topologies" in table[u"title"].lower():
989                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
990                     if tbl_dict.get(tst_name_mod, None) is None:
991                         continue
992                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
993                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
994                     if tbl_dict[tst_name_mod][u"history"].\
995                             get(item[u"title"], None) is None:
996                         tbl_dict[tst_name_mod][u"history"][item[
997                             u"title"]] = list()
998                     try:
999                         if table[u"include-tests"] == u"MRR":
1000                             res = tst_data[u"result"][u"receive-rate"]
1001                         elif table[u"include-tests"] == u"PDR":
1002                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1003                         elif table[u"include-tests"] == u"NDR":
1004                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1005                         else:
1006                             continue
1007                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1008                             append(res)
1009                     except (TypeError, KeyError):
1010                         pass
1011
1012     tbl_lst = list()
1013     footnote = False
1014     for tst_name in tbl_dict:
1015         item = [tbl_dict[tst_name][u"name"], ]
1016         if history:
1017             if tbl_dict[tst_name].get(u"history", None) is not None:
1018                 for hist_data in tbl_dict[tst_name][u"history"].values():
1019                     if hist_data:
1020                         item.append(round(mean(hist_data) / 1000000, 2))
1021                         item.append(round(stdev(hist_data) / 1000000, 2))
1022                     else:
1023                         item.extend([u"Not tested", u"Not tested"])
1024             else:
1025                 item.extend([u"Not tested", u"Not tested"])
1026         data_t = tbl_dict[tst_name][u"ref-data"]
1027         if data_t:
1028             item.append(round(mean(data_t) / 1000000, 2))
1029             item.append(round(stdev(data_t) / 1000000, 2))
1030         else:
1031             item.extend([u"Not tested", u"Not tested"])
1032         data_t = tbl_dict[tst_name][u"cmp-data"]
1033         if data_t:
1034             item.append(round(mean(data_t) / 1000000, 2))
1035             item.append(round(stdev(data_t) / 1000000, 2))
1036         else:
1037             item.extend([u"Not tested", u"Not tested"])
1038         if item[-2] == u"Not tested":
1039             pass
1040         elif item[-4] == u"Not tested":
1041             item.append(u"New in CSIT-2001")
1042         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1043         #     item.append(u"See footnote [1]")
1044         #     footnote = True
1045         elif item[-4] != 0:
1046             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1047         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1048             tbl_lst.append(item)
1049
1050     tbl_lst = _tpc_sort_table(tbl_lst)
1051
1052     # Generate csv tables:
1053     csv_file = f"{table[u'output-file']}.csv"
1054     with open(csv_file, u"wt") as file_handler:
1055         file_handler.write(header_str)
1056         for test in tbl_lst:
1057             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1058
1059     txt_file_name = f"{table[u'output-file']}.txt"
1060     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1061
1062     if footnote:
1063         with open(txt_file_name, u'a') as txt_file:
1064             txt_file.writelines([
1065                 u"\nFootnotes:\n",
1066                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1067                 u"2-node testbeds, dot1q encapsulation is now used on both "
1068                 u"links of SUT.\n",
1069                 u"    Previously dot1q was used only on a single link with the "
1070                 u"other link carrying untagged Ethernet frames. This changes "
1071                 u"results\n",
1072                 u"    in slightly lower throughput in CSIT-1908 for these "
1073                 u"tests. See release notes."
1074             ])
1075
1076     # Generate html table:
1077     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1078
1079
1080 def table_nics_comparison(table, input_data):
1081     """Generate the table(s) with algorithm: table_nics_comparison
1082     specified in the specification file.
1083
1084     :param table: Table to generate.
1085     :param input_data: Data to process.
1086     :type table: pandas.Series
1087     :type input_data: InputData
1088     """
1089
1090     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1091
1092     # Transform the data
1093     logging.info(
1094         f"    Creating the data set for the {table.get(u'type', u'')} "
1095         f"{table.get(u'title', u'')}."
1096     )
1097     data = input_data.filter_data(table, continue_on_error=True)
1098
1099     # Prepare the header of the tables
1100     try:
1101         header = [u"Test case", ]
1102
1103         if table[u"include-tests"] == u"MRR":
1104             hdr_param = u"Rec Rate"
1105         else:
1106             hdr_param = u"Thput"
1107
1108         header.extend(
1109             [
1110                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1111                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1112                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1113                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1114                 u"Delta [%]"
1115             ]
1116         )
1117
1118     except (AttributeError, KeyError) as err:
1119         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1120         return
1121
1122     # Prepare data to the table:
1123     tbl_dict = dict()
1124     for job, builds in table[u"data"].items():
1125         for build in builds:
1126             for tst_name, tst_data in data[job][str(build)].items():
1127                 tst_name_mod = _tpc_modify_test_name(tst_name)
1128                 if tbl_dict.get(tst_name_mod, None) is None:
1129                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1130                     tbl_dict[tst_name_mod] = {
1131                         u"name": name,
1132                         u"ref-data": list(),
1133                         u"cmp-data": list()
1134                     }
1135                 try:
1136                     result = None
1137                     if table[u"include-tests"] == u"MRR":
1138                         result = tst_data[u"result"][u"receive-rate"]
1139                     elif table[u"include-tests"] == u"PDR":
1140                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1141                     elif table[u"include-tests"] == u"NDR":
1142                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1143                     else:
1144                         continue
1145
1146                     if result and \
1147                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1148                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1149                     elif result and \
1150                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1151                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1152                 except (TypeError, KeyError) as err:
1153                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1154                     # No data in output.xml for this test
1155
1156     tbl_lst = list()
1157     for tst_name in tbl_dict:
1158         item = [tbl_dict[tst_name][u"name"], ]
1159         data_t = tbl_dict[tst_name][u"ref-data"]
1160         if data_t:
1161             item.append(round(mean(data_t) / 1000000, 2))
1162             item.append(round(stdev(data_t) / 1000000, 2))
1163         else:
1164             item.extend([None, None])
1165         data_t = tbl_dict[tst_name][u"cmp-data"]
1166         if data_t:
1167             item.append(round(mean(data_t) / 1000000, 2))
1168             item.append(round(stdev(data_t) / 1000000, 2))
1169         else:
1170             item.extend([None, None])
1171         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1172             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1173         if len(item) == len(header):
1174             tbl_lst.append(item)
1175
1176     # Sort the table according to the relative change
1177     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1178
1179     # Generate csv tables:
1180     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1181         file_handler.write(u",".join(header) + u"\n")
1182         for test in tbl_lst:
1183             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1184
1185     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1186                               f"{table[u'output-file']}.txt")
1187
1188     # Generate html table:
1189     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1190
1191
1192 def table_soak_vs_ndr(table, input_data):
1193     """Generate the table(s) with algorithm: table_soak_vs_ndr
1194     specified in the specification file.
1195
1196     :param table: Table to generate.
1197     :param input_data: Data to process.
1198     :type table: pandas.Series
1199     :type input_data: InputData
1200     """
1201
1202     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1203
1204     # Transform the data
1205     logging.info(
1206         f"    Creating the data set for the {table.get(u'type', u'')} "
1207         f"{table.get(u'title', u'')}."
1208     )
1209     data = input_data.filter_data(table, continue_on_error=True)
1210
1211     # Prepare the header of the table
1212     try:
1213         header = [
1214             u"Test case",
1215             f"{table[u'reference'][u'title']} Thput [Mpps]",
1216             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1217             f"{table[u'compare'][u'title']} Thput [Mpps]",
1218             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1219             u"Delta [%]", u"Stdev of delta [%]"
1220         ]
1221         header_str = u",".join(header) + u"\n"
1222     except (AttributeError, KeyError) as err:
1223         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1224         return
1225
1226     # Create a list of available SOAK test results:
1227     tbl_dict = dict()
1228     for job, builds in table[u"compare"][u"data"].items():
1229         for build in builds:
1230             for tst_name, tst_data in data[job][str(build)].items():
1231                 if tst_data[u"type"] == u"SOAK":
1232                     tst_name_mod = tst_name.replace(u"-soak", u"")
1233                     if tbl_dict.get(tst_name_mod, None) is None:
1234                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1235                         nic = groups.group(0) if groups else u""
1236                         name = (
1237                             f"{nic}-"
1238                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1239                         )
1240                         tbl_dict[tst_name_mod] = {
1241                             u"name": name,
1242                             u"ref-data": list(),
1243                             u"cmp-data": list()
1244                         }
1245                     try:
1246                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1247                             tst_data[u"throughput"][u"LOWER"])
1248                     except (KeyError, TypeError):
1249                         pass
1250     tests_lst = tbl_dict.keys()
1251
1252     # Add corresponding NDR test results:
1253     for job, builds in table[u"reference"][u"data"].items():
1254         for build in builds:
1255             for tst_name, tst_data in data[job][str(build)].items():
1256                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1257                     replace(u"-mrr", u"")
1258                 if tst_name_mod not in tests_lst:
1259                     continue
1260                 try:
1261                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1262                         continue
1263                     if table[u"include-tests"] == u"MRR":
1264                         result = tst_data[u"result"][u"receive-rate"]
1265                     elif table[u"include-tests"] == u"PDR":
1266                         result = \
1267                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1268                     elif table[u"include-tests"] == u"NDR":
1269                         result = \
1270                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1271                     else:
1272                         result = None
1273                     if result is not None:
1274                         tbl_dict[tst_name_mod][u"ref-data"].append(
1275                             result)
1276                 except (KeyError, TypeError):
1277                     continue
1278
1279     tbl_lst = list()
1280     for tst_name in tbl_dict:
1281         item = [tbl_dict[tst_name][u"name"], ]
1282         data_r = tbl_dict[tst_name][u"ref-data"]
1283         if data_r:
1284             data_r_mean = mean(data_r)
1285             item.append(round(data_r_mean / 1000000, 2))
1286             data_r_stdev = stdev(data_r)
1287             item.append(round(data_r_stdev / 1000000, 2))
1288         else:
1289             data_r_mean = None
1290             data_r_stdev = None
1291             item.extend([None, None])
1292         data_c = tbl_dict[tst_name][u"cmp-data"]
1293         if data_c:
1294             data_c_mean = mean(data_c)
1295             item.append(round(data_c_mean / 1000000, 2))
1296             data_c_stdev = stdev(data_c)
1297             item.append(round(data_c_stdev / 1000000, 2))
1298         else:
1299             data_c_mean = None
1300             data_c_stdev = None
1301             item.extend([None, None])
1302         if data_r_mean and data_c_mean:
1303             delta, d_stdev = relative_change_stdev(
1304                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1305             item.append(round(delta, 2))
1306             item.append(round(d_stdev, 2))
1307             tbl_lst.append(item)
1308
1309     # Sort the table according to the relative change
1310     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1311
1312     # Generate csv tables:
1313     csv_file = f"{table[u'output-file']}.csv"
1314     with open(csv_file, u"wt") as file_handler:
1315         file_handler.write(header_str)
1316         for test in tbl_lst:
1317             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1318
1319     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1320
1321     # Generate html table:
1322     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1323
1324
1325 def table_perf_trending_dash(table, input_data):
1326     """Generate the table(s) with algorithm:
1327     table_perf_trending_dash
1328     specified in the specification file.
1329
1330     :param table: Table to generate.
1331     :param input_data: Data to process.
1332     :type table: pandas.Series
1333     :type input_data: InputData
1334     """
1335
1336     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1337
1338     # Transform the data
1339     logging.info(
1340         f"    Creating the data set for the {table.get(u'type', u'')} "
1341         f"{table.get(u'title', u'')}."
1342     )
1343     data = input_data.filter_data(table, continue_on_error=True)
1344
1345     # Prepare the header of the tables
1346     header = [
1347         u"Test Case",
1348         u"Trend [Mpps]",
1349         u"Short-Term Change [%]",
1350         u"Long-Term Change [%]",
1351         u"Regressions [#]",
1352         u"Progressions [#]"
1353     ]
1354     header_str = u",".join(header) + u"\n"
1355
1356     # Prepare data to the table:
1357     tbl_dict = dict()
1358     for job, builds in table[u"data"].items():
1359         for build in builds:
1360             for tst_name, tst_data in data[job][str(build)].items():
1361                 if tst_name.lower() in table.get(u"ignore-list", list()):
1362                     continue
1363                 if tbl_dict.get(tst_name, None) is None:
1364                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1365                     if not groups:
1366                         continue
1367                     nic = groups.group(0)
1368                     tbl_dict[tst_name] = {
1369                         u"name": f"{nic}-{tst_data[u'name']}",
1370                         u"data": OrderedDict()
1371                     }
1372                 try:
1373                     tbl_dict[tst_name][u"data"][str(build)] = \
1374                         tst_data[u"result"][u"receive-rate"]
1375                 except (TypeError, KeyError):
1376                     pass  # No data in output.xml for this test
1377
1378     tbl_lst = list()
1379     for tst_name in tbl_dict:
1380         data_t = tbl_dict[tst_name][u"data"]
1381         if len(data_t) < 2:
1382             continue
1383
1384         classification_lst, avgs = classify_anomalies(data_t)
1385
1386         win_size = min(len(data_t), table[u"window"])
1387         long_win_size = min(len(data_t), table[u"long-trend-window"])
1388
1389         try:
1390             max_long_avg = max(
1391                 [x for x in avgs[-long_win_size:-win_size]
1392                  if not isnan(x)])
1393         except ValueError:
1394             max_long_avg = nan
1395         last_avg = avgs[-1]
1396         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1397
1398         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1399             rel_change_last = nan
1400         else:
1401             rel_change_last = round(
1402                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1403
1404         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1405             rel_change_long = nan
1406         else:
1407             rel_change_long = round(
1408                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1409
1410         if classification_lst:
1411             if isnan(rel_change_last) and isnan(rel_change_long):
1412                 continue
1413             if isnan(last_avg) or isnan(rel_change_last) or \
1414                     isnan(rel_change_long):
1415                 continue
1416             tbl_lst.append(
1417                 [tbl_dict[tst_name][u"name"],
1418                  round(last_avg / 1000000, 2),
1419                  rel_change_last,
1420                  rel_change_long,
1421                  classification_lst[-win_size:].count(u"regression"),
1422                  classification_lst[-win_size:].count(u"progression")])
1423
1424     tbl_lst.sort(key=lambda rel: rel[0])
1425
1426     tbl_sorted = list()
1427     for nrr in range(table[u"window"], -1, -1):
1428         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1429         for nrp in range(table[u"window"], -1, -1):
1430             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1431             tbl_out.sort(key=lambda rel: rel[2])
1432             tbl_sorted.extend(tbl_out)
1433
1434     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1435
1436     logging.info(f"    Writing file: {file_name}")
1437     with open(file_name, u"wt") as file_handler:
1438         file_handler.write(header_str)
1439         for test in tbl_sorted:
1440             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1441
1442     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1443     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1444
1445
1446 def _generate_url(testbed, test_name):
1447     """Generate URL to a trending plot from the name of the test case.
1448
1449     :param testbed: The testbed used for testing.
1450     :param test_name: The name of the test case.
1451     :type testbed: str
1452     :type test_name: str
1453     :returns: The URL to the plot with the trending data for the given test
1454         case.
1455     :rtype str
1456     """
1457
1458     if u"x520" in test_name:
1459         nic = u"x520"
1460     elif u"x710" in test_name:
1461         nic = u"x710"
1462     elif u"xl710" in test_name:
1463         nic = u"xl710"
1464     elif u"xxv710" in test_name:
1465         nic = u"xxv710"
1466     elif u"vic1227" in test_name:
1467         nic = u"vic1227"
1468     elif u"vic1385" in test_name:
1469         nic = u"vic1385"
1470     elif u"x553" in test_name:
1471         nic = u"x553"
1472     else:
1473         nic = u""
1474
1475     if u"64b" in test_name:
1476         frame_size = u"64b"
1477     elif u"78b" in test_name:
1478         frame_size = u"78b"
1479     elif u"imix" in test_name:
1480         frame_size = u"imix"
1481     elif u"9000b" in test_name:
1482         frame_size = u"9000b"
1483     elif u"1518b" in test_name:
1484         frame_size = u"1518b"
1485     elif u"114b" in test_name:
1486         frame_size = u"114b"
1487     else:
1488         frame_size = u""
1489
1490     if u"1t1c" in test_name or \
1491         (u"-1c-" in test_name and
1492          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1493         cores = u"1t1c"
1494     elif u"2t2c" in test_name or \
1495          (u"-2c-" in test_name and
1496           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1497         cores = u"2t2c"
1498     elif u"4t4c" in test_name or \
1499          (u"-4c-" in test_name and
1500           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1501         cores = u"4t4c"
1502     elif u"2t1c" in test_name or \
1503          (u"-1c-" in test_name and
1504           testbed in (u"2n-skx", u"3n-skx")):
1505         cores = u"2t1c"
1506     elif u"4t2c" in test_name:
1507         cores = u"4t2c"
1508     elif u"8t4c" in test_name:
1509         cores = u"8t4c"
1510     else:
1511         cores = u""
1512
1513     if u"testpmd" in test_name:
1514         driver = u"testpmd"
1515     elif u"l3fwd" in test_name:
1516         driver = u"l3fwd"
1517     elif u"avf" in test_name:
1518         driver = u"avf"
1519     elif u"dnv" in testbed or u"tsh" in testbed:
1520         driver = u"ixgbe"
1521     else:
1522         driver = u"i40e"
1523
1524     if u"acl" in test_name or \
1525             u"macip" in test_name or \
1526             u"nat" in test_name or \
1527             u"policer" in test_name or \
1528             u"cop" in test_name:
1529         bsf = u"features"
1530     elif u"scale" in test_name:
1531         bsf = u"scale"
1532     elif u"base" in test_name:
1533         bsf = u"base"
1534     else:
1535         bsf = u"base"
1536
1537     if u"114b" in test_name and u"vhost" in test_name:
1538         domain = u"vts"
1539     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1540         domain = u"dpdk"
1541     elif u"memif" in test_name:
1542         domain = u"container_memif"
1543     elif u"srv6" in test_name:
1544         domain = u"srv6"
1545     elif u"vhost" in test_name:
1546         domain = u"vhost"
1547         if u"vppl2xc" in test_name:
1548             driver += u"-vpp"
1549         else:
1550             driver += u"-testpmd"
1551         if u"lbvpplacp" in test_name:
1552             bsf += u"-link-bonding"
1553     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1554         domain = u"nf_service_density_vnfc"
1555     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1556         domain = u"nf_service_density_cnfc"
1557     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1558         domain = u"nf_service_density_cnfp"
1559     elif u"ipsec" in test_name:
1560         domain = u"ipsec"
1561         if u"sw" in test_name:
1562             bsf += u"-sw"
1563         elif u"hw" in test_name:
1564             bsf += u"-hw"
1565     elif u"ethip4vxlan" in test_name:
1566         domain = u"ip4_tunnels"
1567     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1568         domain = u"ip4"
1569     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1570         domain = u"ip6"
1571     elif u"l2xcbase" in test_name or \
1572             u"l2xcscale" in test_name or \
1573             u"l2bdbasemaclrn" in test_name or \
1574             u"l2bdscale" in test_name or \
1575             u"l2patch" in test_name:
1576         domain = u"l2"
1577     else:
1578         domain = u""
1579
1580     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1581     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1582
1583     return file_name + anchor_name
1584
1585
1586 def table_perf_trending_dash_html(table, input_data):
1587     """Generate the table(s) with algorithm:
1588     table_perf_trending_dash_html specified in the specification
1589     file.
1590
1591     :param table: Table to generate.
1592     :param input_data: Data to process.
1593     :type table: dict
1594     :type input_data: InputData
1595     """
1596
1597     _ = input_data
1598
1599     if not table.get(u"testbed", None):
1600         logging.error(
1601             f"The testbed is not defined for the table "
1602             f"{table.get(u'title', u'')}."
1603         )
1604         return
1605
1606     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1607
1608     try:
1609         with open(table[u"input-file"], u'rt') as csv_file:
1610             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1611     except KeyError:
1612         logging.warning(u"The input file is not defined.")
1613         return
1614     except csv.Error as err:
1615         logging.warning(
1616             f"Not possible to process the file {table[u'input-file']}.\n"
1617             f"{repr(err)}"
1618         )
1619         return
1620
1621     # Table:
1622     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1623
1624     # Table header:
1625     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1626     for idx, item in enumerate(csv_lst[0]):
1627         alignment = u"left" if idx == 0 else u"center"
1628         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1629         thead.text = item
1630
1631     # Rows:
1632     colors = {
1633         u"regression": (
1634             u"#ffcccc",
1635             u"#ff9999"
1636         ),
1637         u"progression": (
1638             u"#c6ecc6",
1639             u"#9fdf9f"
1640         ),
1641         u"normal": (
1642             u"#e9f1fb",
1643             u"#d4e4f7"
1644         )
1645     }
1646     for r_idx, row in enumerate(csv_lst[1:]):
1647         if int(row[4]):
1648             color = u"regression"
1649         elif int(row[5]):
1650             color = u"progression"
1651         else:
1652             color = u"normal"
1653         trow = ET.SubElement(
1654             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1655         )
1656
1657         # Columns:
1658         for c_idx, item in enumerate(row):
1659             tdata = ET.SubElement(
1660                 trow,
1661                 u"td",
1662                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1663             )
1664             # Name:
1665             if c_idx == 0:
1666                 ref = ET.SubElement(
1667                     tdata,
1668                     u"a",
1669                     attrib=dict(
1670                         href=f"../trending/"
1671                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1672                     )
1673                 )
1674                 ref.text = item
1675             else:
1676                 tdata.text = item
1677     try:
1678         with open(table[u"output-file"], u'w') as html_file:
1679             logging.info(f"    Writing file: {table[u'output-file']}")
1680             html_file.write(u".. raw:: html\n\n\t")
1681             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1682             html_file.write(u"\n\t<p><br><br></p>\n")
1683     except KeyError:
1684         logging.warning(u"The output file is not defined.")
1685         return
1686
1687
1688 def table_last_failed_tests(table, input_data):
1689     """Generate the table(s) with algorithm: table_last_failed_tests
1690     specified in the specification file.
1691
1692     :param table: Table to generate.
1693     :param input_data: Data to process.
1694     :type table: pandas.Series
1695     :type input_data: InputData
1696     """
1697
1698     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1699
1700     # Transform the data
1701     logging.info(
1702         f"    Creating the data set for the {table.get(u'type', u'')} "
1703         f"{table.get(u'title', u'')}."
1704     )
1705
1706     data = input_data.filter_data(table, continue_on_error=True)
1707
1708     if data is None or data.empty:
1709         logging.warning(
1710             f"    No data for the {table.get(u'type', u'')} "
1711             f"{table.get(u'title', u'')}."
1712         )
1713         return
1714
1715     tbl_list = list()
1716     for job, builds in table[u"data"].items():
1717         for build in builds:
1718             build = str(build)
1719             try:
1720                 version = input_data.metadata(job, build).get(u"version", u"")
1721             except KeyError:
1722                 logging.error(f"Data for {job}: {build} is not present.")
1723                 return
1724             tbl_list.append(build)
1725             tbl_list.append(version)
1726             failed_tests = list()
1727             passed = 0
1728             failed = 0
1729             for tst_data in data[job][build].values:
1730                 if tst_data[u"status"] != u"FAIL":
1731                     passed += 1
1732                     continue
1733                 failed += 1
1734                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1735                 if not groups:
1736                     continue
1737                 nic = groups.group(0)
1738                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1739             tbl_list.append(str(passed))
1740             tbl_list.append(str(failed))
1741             tbl_list.extend(failed_tests)
1742
1743     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1744     logging.info(f"    Writing file: {file_name}")
1745     with open(file_name, u"wt") as file_handler:
1746         for test in tbl_list:
1747             file_handler.write(test + u'\n')
1748
1749
1750 def table_failed_tests(table, input_data):
1751     """Generate the table(s) with algorithm: table_failed_tests
1752     specified in the specification file.
1753
1754     :param table: Table to generate.
1755     :param input_data: Data to process.
1756     :type table: pandas.Series
1757     :type input_data: InputData
1758     """
1759
1760     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1761
1762     # Transform the data
1763     logging.info(
1764         f"    Creating the data set for the {table.get(u'type', u'')} "
1765         f"{table.get(u'title', u'')}."
1766     )
1767     data = input_data.filter_data(table, continue_on_error=True)
1768
1769     # Prepare the header of the tables
1770     header = [
1771         u"Test Case",
1772         u"Failures [#]",
1773         u"Last Failure [Time]",
1774         u"Last Failure [VPP-Build-Id]",
1775         u"Last Failure [CSIT-Job-Build-Id]"
1776     ]
1777
1778     # Generate the data for the table according to the model in the table
1779     # specification
1780
1781     now = dt.utcnow()
1782     timeperiod = timedelta(int(table.get(u"window", 7)))
1783
1784     tbl_dict = dict()
1785     for job, builds in table[u"data"].items():
1786         for build in builds:
1787             build = str(build)
1788             for tst_name, tst_data in data[job][build].items():
1789                 if tst_name.lower() in table.get(u"ignore-list", list()):
1790                     continue
1791                 if tbl_dict.get(tst_name, None) is None:
1792                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1793                     if not groups:
1794                         continue
1795                     nic = groups.group(0)
1796                     tbl_dict[tst_name] = {
1797                         u"name": f"{nic}-{tst_data[u'name']}",
1798                         u"data": OrderedDict()
1799                     }
1800                 try:
1801                     generated = input_data.metadata(job, build).\
1802                         get(u"generated", u"")
1803                     if not generated:
1804                         continue
1805                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1806                     if (now - then) <= timeperiod:
1807                         tbl_dict[tst_name][u"data"][build] = (
1808                             tst_data[u"status"],
1809                             generated,
1810                             input_data.metadata(job, build).get(u"version",
1811                                                                 u""),
1812                             build
1813                         )
1814                 except (TypeError, KeyError) as err:
1815                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1816
1817     max_fails = 0
1818     tbl_lst = list()
1819     for tst_data in tbl_dict.values():
1820         fails_nr = 0
1821         fails_last_date = u""
1822         fails_last_vpp = u""
1823         fails_last_csit = u""
1824         for val in tst_data[u"data"].values():
1825             if val[0] == u"FAIL":
1826                 fails_nr += 1
1827                 fails_last_date = val[1]
1828                 fails_last_vpp = val[2]
1829                 fails_last_csit = val[3]
1830         if fails_nr:
1831             max_fails = fails_nr if fails_nr > max_fails else max_fails
1832             tbl_lst.append(
1833                 [
1834                     tst_data[u"name"],
1835                     fails_nr,
1836                     fails_last_date,
1837                     fails_last_vpp,
1838                     f"mrr-daily-build-{fails_last_csit}"
1839                 ]
1840             )
1841
1842     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1843     tbl_sorted = list()
1844     for nrf in range(max_fails, -1, -1):
1845         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1846         tbl_sorted.extend(tbl_fails)
1847
1848     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1849     logging.info(f"    Writing file: {file_name}")
1850     with open(file_name, u"wt") as file_handler:
1851         file_handler.write(u",".join(header) + u"\n")
1852         for test in tbl_sorted:
1853             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1854
1855     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1856     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1857
1858
1859 def table_failed_tests_html(table, input_data):
1860     """Generate the table(s) with algorithm: table_failed_tests_html
1861     specified in the specification file.
1862
1863     :param table: Table to generate.
1864     :param input_data: Data to process.
1865     :type table: pandas.Series
1866     :type input_data: InputData
1867     """
1868
1869     _ = input_data
1870
1871     if not table.get(u"testbed", None):
1872         logging.error(
1873             f"The testbed is not defined for the table "
1874             f"{table.get(u'title', u'')}."
1875         )
1876         return
1877
1878     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1879
1880     try:
1881         with open(table[u"input-file"], u'rt') as csv_file:
1882             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1883     except KeyError:
1884         logging.warning(u"The input file is not defined.")
1885         return
1886     except csv.Error as err:
1887         logging.warning(
1888             f"Not possible to process the file {table[u'input-file']}.\n"
1889             f"{repr(err)}"
1890         )
1891         return
1892
1893     # Table:
1894     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1895
1896     # Table header:
1897     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1898     for idx, item in enumerate(csv_lst[0]):
1899         alignment = u"left" if idx == 0 else u"center"
1900         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1901         thead.text = item
1902
1903     # Rows:
1904     colors = (u"#e9f1fb", u"#d4e4f7")
1905     for r_idx, row in enumerate(csv_lst[1:]):
1906         background = colors[r_idx % 2]
1907         trow = ET.SubElement(
1908             failed_tests, u"tr", attrib=dict(bgcolor=background)
1909         )
1910
1911         # Columns:
1912         for c_idx, item in enumerate(row):
1913             tdata = ET.SubElement(
1914                 trow,
1915                 u"td",
1916                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1917             )
1918             # Name:
1919             if c_idx == 0:
1920                 ref = ET.SubElement(
1921                     tdata,
1922                     u"a",
1923                     attrib=dict(
1924                         href=f"../trending/"
1925                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1926                     )
1927                 )
1928                 ref.text = item
1929             else:
1930                 tdata.text = item
1931     try:
1932         with open(table[u"output-file"], u'w') as html_file:
1933             logging.info(f"    Writing file: {table[u'output-file']}")
1934             html_file.write(u".. raw:: html\n\n\t")
1935             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1936             html_file.write(u"\n\t<p><br><br></p>\n")
1937     except KeyError:
1938         logging.warning(u"The output file is not defined.")
1939         return