25e827049ed604d175025c32af4c6b41e01ff8d6
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99
100     sort_tests = table.get(u"sort", None)
101     if sort_tests and sort_tests in (u"ascending", u"descending"):
102         args = dict(
103             inplace=True,
104             ascending=True if sort_tests == u"ascending" else False
105         )
106         data.sort_index(**args)
107
108     suites = input_data.filter_data(
109         table,
110         continue_on_error=True,
111         data_set=u"suites"
112     )
113     if suites.empty:
114         return
115     suites = input_data.merge_data(suites)
116
117     def _generate_html_table(tst_data):
118         """Generate an HTML table with operational data for the given test.
119
120         :param tst_data: Test data to be used to generate the table.
121         :type tst_data: pandas.Series
122         :returns: HTML table with operational data.
123         :rtype: str
124         """
125
126         colors = {
127             u"header": u"#7eade7",
128             u"empty": u"#ffffff",
129             u"body": (u"#e9f1fb", u"#d4e4f7")
130         }
131
132         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = tst_data[u"name"]
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = u"\t"
145
146         if tst_data.get(u"show-run", u"No Data") == u"No Data":
147             trow = ET.SubElement(
148                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
149             )
150             tcol = ET.SubElement(
151                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
152             )
153             tcol.text = u"No Data"
154             return str(ET.tostring(tbl, encoding=u"unicode"))
155
156         tbl_hdr = (
157             u"Name",
158             u"Nr of Vectors",
159             u"Nr of Packets",
160             u"Suspends",
161             u"Cycles per Packet",
162             u"Average Vector Size"
163         )
164
165         for dut_name, dut_data in tst_data[u"show-run"].items():
166             trow = ET.SubElement(
167                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
168             )
169             tcol = ET.SubElement(
170                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
171             )
172             if dut_data.get(u"threads", None) is None:
173                 tcol.text = u"No Data"
174                 continue
175             bold = ET.SubElement(tcol, u"b")
176             bold.text = dut_name
177
178             trow = ET.SubElement(
179                 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
180             )
181             tcol = ET.SubElement(
182                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183             )
184             bold = ET.SubElement(tcol, u"b")
185             bold.text = (
186                 f"Host IP: {dut_data.get(u'host', '')}, "
187                 f"Socket: {dut_data.get(u'socket', '')}"
188             )
189             trow = ET.SubElement(
190                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
191             )
192             thead = ET.SubElement(
193                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
194             )
195             thead.text = u"\t"
196
197             for thread_nr, thread in dut_data[u"threads"].items():
198                 trow = ET.SubElement(
199                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
200                 )
201                 tcol = ET.SubElement(
202                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
203                 )
204                 bold = ET.SubElement(tcol, u"b")
205                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
206                 trow = ET.SubElement(
207                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
208                 )
209                 for idx, col in enumerate(tbl_hdr):
210                     tcol = ET.SubElement(
211                         trow, u"td",
212                         attrib=dict(align=u"right" if idx else u"left")
213                     )
214                     font = ET.SubElement(
215                         tcol, u"font", attrib=dict(size=u"2")
216                     )
217                     bold = ET.SubElement(font, u"b")
218                     bold.text = col
219                 for row_nr, row in enumerate(thread):
220                     trow = ET.SubElement(
221                         tbl, u"tr",
222                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
223                     )
224                     for idx, col in enumerate(row):
225                         tcol = ET.SubElement(
226                             trow, u"td",
227                             attrib=dict(align=u"right" if idx else u"left")
228                         )
229                         font = ET.SubElement(
230                             tcol, u"font", attrib=dict(size=u"2")
231                         )
232                         if isinstance(col, float):
233                             font.text = f"{col:.2f}"
234                         else:
235                             font.text = str(col)
236                 trow = ET.SubElement(
237                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
238                 )
239                 thead = ET.SubElement(
240                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241                 )
242                 thead.text = u"\t"
243
244         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
245         thead = ET.SubElement(
246             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247         )
248         font = ET.SubElement(
249             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
250         )
251         font.text = u"."
252
253         return str(ET.tostring(tbl, encoding=u"unicode"))
254
255     for suite in suites.values:
256         html_table = str()
257         for test_data in data.values:
258             if test_data[u"parent"] not in suite[u"name"]:
259                 continue
260             html_table += _generate_html_table(test_data)
261         if not html_table:
262             continue
263         try:
264             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
265             with open(f"{file_name}", u'w') as html_file:
266                 logging.info(f"    Writing file: {file_name}")
267                 html_file.write(u".. raw:: html\n\n\t")
268                 html_file.write(html_table)
269                 html_file.write(u"\n\t<p><br><br></p>\n")
270         except KeyError:
271             logging.warning(u"The output file is not defined.")
272             return
273     logging.info(u"  Done.")
274
275
276 def table_merged_details(table, input_data):
277     """Generate the table(s) with algorithm: table_merged_details
278     specified in the specification file.
279
280     :param table: Table to generate.
281     :param input_data: Data to process.
282     :type table: pandas.Series
283     :type input_data: InputData
284     """
285
286     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
287     # Transform the data
288     logging.info(
289         f"    Creating the data set for the {table.get(u'type', u'')} "
290         f"{table.get(u'title', u'')}."
291     )
292     data = input_data.filter_data(table, continue_on_error=True)
293     data = input_data.merge_data(data)
294
295     sort_tests = table.get(u"sort", None)
296     if sort_tests and sort_tests in (u"ascending", u"descending"):
297         args = dict(
298             inplace=True,
299             ascending=True if sort_tests == u"ascending" else False
300         )
301         data.sort_index(**args)
302
303     suites = input_data.filter_data(
304         table, continue_on_error=True, data_set=u"suites")
305     suites = input_data.merge_data(suites)
306
307     # Prepare the header of the tables
308     header = list()
309     for column in table[u"columns"]:
310         header.append(
311             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
312         )
313
314     for suite in suites.values:
315         # Generate data
316         suite_name = suite[u"name"]
317         table_lst = list()
318         for test in data.keys():
319             if data[test][u"parent"] not in suite_name:
320                 continue
321             row_lst = list()
322             for column in table[u"columns"]:
323                 try:
324                     col_data = str(data[test][column[
325                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
326                     col_data = col_data.replace(
327                         u"No Data", u"Not Captured     "
328                     )
329                     if column[u"data"].split(u" ")[1] in (u"name", ):
330                         if len(col_data) > 30:
331                             col_data_lst = col_data.split(u"-")
332                             half = int(len(col_data_lst) / 2)
333                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
334                                        f"- |br| " \
335                                        f"{u'-'.join(col_data_lst[half:])}"
336                         col_data = f" |prein| {col_data} |preout| "
337                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
338                         col_data = f" |prein| {col_data} |preout| "
339                     elif column[u"data"].split(u" ")[1] in \
340                         (u"conf-history", u"show-run"):
341                         col_data = col_data.replace(u" |br| ", u"", 1)
342                         col_data = f" |prein| {col_data[:-5]} |preout| "
343                     row_lst.append(f'"{col_data}"')
344                 except KeyError:
345                     row_lst.append(u'"Not captured"')
346             table_lst.append(row_lst)
347
348         # Write the data to file
349         if table_lst:
350             file_name = f"{table[u'output-file']}_{suite_name}.csv"
351             logging.info(f"      Writing file: {file_name}")
352             with open(file_name, u"wt") as file_handler:
353                 file_handler.write(u",".join(header) + u"\n")
354                 for item in table_lst:
355                     file_handler.write(u",".join(item) + u"\n")
356
357     logging.info(u"  Done.")
358
359
360 def _tpc_modify_test_name(test_name):
361     """Modify a test name by replacing its parts.
362
363     :param test_name: Test name to be modified.
364     :type test_name: str
365     :returns: Modified test name.
366     :rtype: str
367     """
368     test_name_mod = test_name.\
369         replace(u"-ndrpdrdisc", u""). \
370         replace(u"-ndrpdr", u"").\
371         replace(u"-pdrdisc", u""). \
372         replace(u"-ndrdisc", u"").\
373         replace(u"-pdr", u""). \
374         replace(u"-ndr", u""). \
375         replace(u"1t1c", u"1c").\
376         replace(u"2t1c", u"1c"). \
377         replace(u"2t2c", u"2c").\
378         replace(u"4t2c", u"2c"). \
379         replace(u"4t4c", u"4c").\
380         replace(u"8t4c", u"4c")
381
382     return re.sub(REGEX_NIC, u"", test_name_mod)
383
384
385 def _tpc_modify_displayed_test_name(test_name):
386     """Modify a test name which is displayed in a table by replacing its parts.
387
388     :param test_name: Test name to be modified.
389     :type test_name: str
390     :returns: Modified test name.
391     :rtype: str
392     """
393     return test_name.\
394         replace(u"1t1c", u"1c").\
395         replace(u"2t1c", u"1c"). \
396         replace(u"2t2c", u"2c").\
397         replace(u"4t2c", u"2c"). \
398         replace(u"4t4c", u"4c").\
399         replace(u"8t4c", u"4c")
400
401
402 def _tpc_insert_data(target, src, include_tests):
403     """Insert src data to the target structure.
404
405     :param target: Target structure where the data is placed.
406     :param src: Source data to be placed into the target stucture.
407     :param include_tests: Which results will be included (MRR, NDR, PDR).
408     :type target: list
409     :type src: dict
410     :type include_tests: str
411     """
412     try:
413         if include_tests == u"MRR":
414             target.append(src[u"result"][u"receive-rate"])
415         elif include_tests == u"PDR":
416             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
417         elif include_tests == u"NDR":
418             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
419     except (KeyError, TypeError):
420         pass
421
422
423 def _tpc_sort_table(table):
424     """Sort the table this way:
425
426     1. Put "New in CSIT-XXXX" at the first place.
427     2. Put "See footnote" at the second place.
428     3. Sort the rest by "Delta".
429
430     :param table: Table to sort.
431     :type table: list
432     :returns: Sorted table.
433     :rtype: list
434     """
435
436
437     tbl_new = list()
438     tbl_see = list()
439     tbl_delta = list()
440     for item in table:
441         if isinstance(item[-1], str):
442             if u"New in CSIT" in item[-1]:
443                 tbl_new.append(item)
444             elif u"See footnote" in item[-1]:
445                 tbl_see.append(item)
446         else:
447             tbl_delta.append(item)
448
449     # Sort the tables:
450     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
451     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
452     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
453     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
454
455     # Put the tables together:
456     table = list()
457     table.extend(tbl_new)
458     table.extend(tbl_see)
459     table.extend(tbl_delta)
460
461     return table
462
463
464 def _tpc_generate_html_table(header, data, output_file_name):
465     """Generate html table from input data with simple sorting possibility.
466
467     :param header: Table header.
468     :param data: Input data to be included in the table. It is a list of lists.
469         Inner lists are rows in the table. All inner lists must be of the same
470         length. The length of these lists must be the same as the length of the
471         header.
472     :param output_file_name: The name (relative or full path) where the
473         generated html table is written.
474     :type header: list
475     :type data: list of lists
476     :type output_file_name: str
477     """
478
479     df_data = pd.DataFrame(data, columns=header)
480
481     df_sorted = [df_data.sort_values(
482         by=[key, header[0]], ascending=[True, True]
483         if key != header[0] else [False, True]) for key in header]
484     df_sorted_rev = [df_data.sort_values(
485         by=[key, header[0]], ascending=[False, True]
486         if key != header[0] else [True, True]) for key in header]
487     df_sorted.extend(df_sorted_rev)
488
489     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
490                    for idx in range(len(df_data))]]
491     table_header = dict(
492         values=[f"<b>{item}</b>" for item in header],
493         fill_color=u"#7eade7",
494         align=[u"left", u"center"]
495     )
496
497     fig = go.Figure()
498
499     for table in df_sorted:
500         columns = [table.get(col) for col in header]
501         fig.add_trace(
502             go.Table(
503                 columnwidth=[30, 10],
504                 header=table_header,
505                 cells=dict(
506                     values=columns,
507                     fill_color=fill_color,
508                     align=[u"left", u"right"]
509                 )
510             )
511         )
512
513     buttons = list()
514     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
515     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
516     menu_items.extend(menu_items_rev)
517     for idx, hdr in enumerate(menu_items):
518         visible = [False, ] * len(menu_items)
519         visible[idx] = True
520         buttons.append(
521             dict(
522                 label=hdr.replace(u" [Mpps]", u""),
523                 method=u"update",
524                 args=[{u"visible": visible}],
525             )
526         )
527
528     fig.update_layout(
529         updatemenus=[
530             go.layout.Updatemenu(
531                 type=u"dropdown",
532                 direction=u"down",
533                 x=0.03,
534                 xanchor=u"left",
535                 y=1.045,
536                 yanchor=u"top",
537                 active=len(menu_items) - 1,
538                 buttons=list(buttons)
539             )
540         ],
541         annotations=[
542             go.layout.Annotation(
543                 text=u"<b>Sort by:</b>",
544                 x=0,
545                 xref=u"paper",
546                 y=1.035,
547                 yref=u"paper",
548                 align=u"left",
549                 showarrow=False
550             )
551         ]
552     )
553
554     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
555
556
557 def table_perf_comparison(table, input_data):
558     """Generate the table(s) with algorithm: table_perf_comparison
559     specified in the specification file.
560
561     :param table: Table to generate.
562     :param input_data: Data to process.
563     :type table: pandas.Series
564     :type input_data: InputData
565     """
566
567     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
568
569     # Transform the data
570     logging.info(
571         f"    Creating the data set for the {table.get(u'type', u'')} "
572         f"{table.get(u'title', u'')}."
573     )
574     data = input_data.filter_data(table, continue_on_error=True)
575
576     # Prepare the header of the tables
577     try:
578         header = [u"Test case", ]
579
580         if table[u"include-tests"] == u"MRR":
581             hdr_param = u"Rec Rate"
582         else:
583             hdr_param = u"Thput"
584
585         history = table.get(u"history", list())
586         for item in history:
587             header.extend(
588                 [
589                     f"{item[u'title']} {hdr_param} [Mpps]",
590                     f"{item[u'title']} Stdev [Mpps]"
591                 ]
592             )
593         header.extend(
594             [
595                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
596                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
597                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
598                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
599                 u"Delta [%]"
600             ]
601         )
602         header_str = u",".join(header) + u"\n"
603     except (AttributeError, KeyError) as err:
604         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
605         return
606
607     # Prepare data to the table:
608     tbl_dict = dict()
609     # topo = ""
610     for job, builds in table[u"reference"][u"data"].items():
611         # topo = u"2n-skx" if u"2n-skx" in job else u""
612         for build in builds:
613             for tst_name, tst_data in data[job][str(build)].items():
614                 tst_name_mod = _tpc_modify_test_name(tst_name)
615                 if u"across topologies" in table[u"title"].lower():
616                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
617                 if tbl_dict.get(tst_name_mod, None) is None:
618                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
619                     nic = groups.group(0) if groups else u""
620                     name = \
621                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
622                     if u"across testbeds" in table[u"title"].lower() or \
623                             u"across topologies" in table[u"title"].lower():
624                         name = _tpc_modify_displayed_test_name(name)
625                     tbl_dict[tst_name_mod] = {
626                         u"name": name,
627                         u"ref-data": list(),
628                         u"cmp-data": list()
629                     }
630                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
631                                  src=tst_data,
632                                  include_tests=table[u"include-tests"])
633
634     replacement = table[u"reference"].get(u"data-replacement", None)
635     if replacement:
636         create_new_list = True
637         rpl_data = input_data.filter_data(
638             table, data=replacement, continue_on_error=True)
639         for job, builds in replacement.items():
640             for build in builds:
641                 for tst_name, tst_data in rpl_data[job][str(build)].items():
642                     tst_name_mod = _tpc_modify_test_name(tst_name)
643                     if u"across topologies" in table[u"title"].lower():
644                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
645                     if tbl_dict.get(tst_name_mod, None) is None:
646                         name = \
647                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
648                         if u"across testbeds" in table[u"title"].lower() or \
649                                 u"across topologies" in table[u"title"].lower():
650                             name = _tpc_modify_displayed_test_name(name)
651                         tbl_dict[tst_name_mod] = {
652                             u"name": name,
653                             u"ref-data": list(),
654                             u"cmp-data": list()
655                         }
656                     if create_new_list:
657                         create_new_list = False
658                         tbl_dict[tst_name_mod][u"ref-data"] = list()
659
660                     _tpc_insert_data(
661                         target=tbl_dict[tst_name_mod][u"ref-data"],
662                         src=tst_data,
663                         include_tests=table[u"include-tests"]
664                     )
665
666     for job, builds in table[u"compare"][u"data"].items():
667         for build in builds:
668             for tst_name, tst_data in data[job][str(build)].items():
669                 tst_name_mod = _tpc_modify_test_name(tst_name)
670                 if u"across topologies" in table[u"title"].lower():
671                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
672                 if tbl_dict.get(tst_name_mod, None) is None:
673                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
674                     nic = groups.group(0) if groups else u""
675                     name = \
676                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
677                     if u"across testbeds" in table[u"title"].lower() or \
678                             u"across topologies" in table[u"title"].lower():
679                         name = _tpc_modify_displayed_test_name(name)
680                     tbl_dict[tst_name_mod] = {
681                         u"name": name,
682                         u"ref-data": list(),
683                         u"cmp-data": list()
684                     }
685                 _tpc_insert_data(
686                     target=tbl_dict[tst_name_mod][u"cmp-data"],
687                     src=tst_data,
688                     include_tests=table[u"include-tests"]
689                 )
690
691     replacement = table[u"compare"].get(u"data-replacement", None)
692     if replacement:
693         create_new_list = True
694         rpl_data = input_data.filter_data(
695             table, data=replacement, continue_on_error=True)
696         for job, builds in replacement.items():
697             for build in builds:
698                 for tst_name, tst_data in rpl_data[job][str(build)].items():
699                     tst_name_mod = _tpc_modify_test_name(tst_name)
700                     if u"across topologies" in table[u"title"].lower():
701                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
702                     if tbl_dict.get(tst_name_mod, None) is None:
703                         name = \
704                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
705                         if u"across testbeds" in table[u"title"].lower() or \
706                                 u"across topologies" in table[u"title"].lower():
707                             name = _tpc_modify_displayed_test_name(name)
708                         tbl_dict[tst_name_mod] = {
709                             u"name": name,
710                             u"ref-data": list(),
711                             u"cmp-data": list()
712                         }
713                     if create_new_list:
714                         create_new_list = False
715                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
716
717                     _tpc_insert_data(
718                         target=tbl_dict[tst_name_mod][u"cmp-data"],
719                         src=tst_data,
720                         include_tests=table[u"include-tests"]
721                     )
722
723     for item in history:
724         for job, builds in item[u"data"].items():
725             for build in builds:
726                 for tst_name, tst_data in data[job][str(build)].items():
727                     tst_name_mod = _tpc_modify_test_name(tst_name)
728                     if u"across topologies" in table[u"title"].lower():
729                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
730                     if tbl_dict.get(tst_name_mod, None) is None:
731                         continue
732                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
733                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
734                     if tbl_dict[tst_name_mod][u"history"].\
735                             get(item[u"title"], None) is None:
736                         tbl_dict[tst_name_mod][u"history"][item[
737                             u"title"]] = list()
738                     try:
739                         if table[u"include-tests"] == u"MRR":
740                             res = tst_data[u"result"][u"receive-rate"]
741                         elif table[u"include-tests"] == u"PDR":
742                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
743                         elif table[u"include-tests"] == u"NDR":
744                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
745                         else:
746                             continue
747                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
748                             append(res)
749                     except (TypeError, KeyError):
750                         pass
751
752     tbl_lst = list()
753     footnote = False
754     for tst_name in tbl_dict:
755         item = [tbl_dict[tst_name][u"name"], ]
756         if history:
757             if tbl_dict[tst_name].get(u"history", None) is not None:
758                 for hist_data in tbl_dict[tst_name][u"history"].values():
759                     if hist_data:
760                         item.append(round(mean(hist_data) / 1000000, 2))
761                         item.append(round(stdev(hist_data) / 1000000, 2))
762                     else:
763                         item.extend([u"Not tested", u"Not tested"])
764             else:
765                 item.extend([u"Not tested", u"Not tested"])
766         data_t = tbl_dict[tst_name][u"ref-data"]
767         if data_t:
768             item.append(round(mean(data_t) / 1000000, 2))
769             item.append(round(stdev(data_t) / 1000000, 2))
770         else:
771             item.extend([u"Not tested", u"Not tested"])
772         data_t = tbl_dict[tst_name][u"cmp-data"]
773         if data_t:
774             item.append(round(mean(data_t) / 1000000, 2))
775             item.append(round(stdev(data_t) / 1000000, 2))
776         else:
777             item.extend([u"Not tested", u"Not tested"])
778         if item[-2] == u"Not tested":
779             pass
780         elif item[-4] == u"Not tested":
781             item.append(u"New in CSIT-2001")
782         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
783         #     item.append(u"See footnote [1]")
784         #     footnote = True
785         elif item[-4] != 0:
786             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
787         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
788             tbl_lst.append(item)
789
790     tbl_lst = _tpc_sort_table(tbl_lst)
791
792     # Generate csv tables:
793     csv_file = f"{table[u'output-file']}.csv"
794     with open(csv_file, u"wt") as file_handler:
795         file_handler.write(header_str)
796         for test in tbl_lst:
797             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
798
799     txt_file_name = f"{table[u'output-file']}.txt"
800     convert_csv_to_pretty_txt(csv_file, txt_file_name)
801
802     if footnote:
803         with open(txt_file_name, u'a') as txt_file:
804             txt_file.writelines([
805                 u"\nFootnotes:\n",
806                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
807                 u"2-node testbeds, dot1q encapsulation is now used on both "
808                 u"links of SUT.\n",
809                 u"    Previously dot1q was used only on a single link with the "
810                 u"other link carrying untagged Ethernet frames. This changes "
811                 u"results\n",
812                 u"    in slightly lower throughput in CSIT-1908 for these "
813                 u"tests. See release notes."
814             ])
815
816     # Generate html table:
817     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
818
819
820 def table_perf_comparison_nic(table, input_data):
821     """Generate the table(s) with algorithm: table_perf_comparison
822     specified in the specification file.
823
824     :param table: Table to generate.
825     :param input_data: Data to process.
826     :type table: pandas.Series
827     :type input_data: InputData
828     """
829
830     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
831
832     # Transform the data
833     logging.info(
834         f"    Creating the data set for the {table.get(u'type', u'')} "
835         f"{table.get(u'title', u'')}."
836     )
837     data = input_data.filter_data(table, continue_on_error=True)
838
839     # Prepare the header of the tables
840     try:
841         header = [u"Test case", ]
842
843         if table[u"include-tests"] == u"MRR":
844             hdr_param = u"Rec Rate"
845         else:
846             hdr_param = u"Thput"
847
848         history = table.get(u"history", list())
849         for item in history:
850             header.extend(
851                 [
852                     f"{item[u'title']} {hdr_param} [Mpps]",
853                     f"{item[u'title']} Stdev [Mpps]"
854                 ]
855             )
856         header.extend(
857             [
858                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
859                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
860                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
861                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
862                 u"Delta [%]"
863             ]
864         )
865         header_str = u",".join(header) + u"\n"
866     except (AttributeError, KeyError) as err:
867         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
868         return
869
870     # Prepare data to the table:
871     tbl_dict = dict()
872     # topo = u""
873     for job, builds in table[u"reference"][u"data"].items():
874         # topo = u"2n-skx" if u"2n-skx" in job else u""
875         for build in builds:
876             for tst_name, tst_data in data[job][str(build)].items():
877                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
878                     continue
879                 tst_name_mod = _tpc_modify_test_name(tst_name)
880                 if u"across topologies" in table[u"title"].lower():
881                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
882                 if tbl_dict.get(tst_name_mod, None) is None:
883                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
884                     if u"across testbeds" in table[u"title"].lower() or \
885                             u"across topologies" in table[u"title"].lower():
886                         name = _tpc_modify_displayed_test_name(name)
887                     tbl_dict[tst_name_mod] = {
888                         u"name": name,
889                         u"ref-data": list(),
890                         u"cmp-data": list()
891                     }
892                 _tpc_insert_data(
893                     target=tbl_dict[tst_name_mod][u"ref-data"],
894                     src=tst_data,
895                     include_tests=table[u"include-tests"]
896                 )
897
898     replacement = table[u"reference"].get(u"data-replacement", None)
899     if replacement:
900         create_new_list = True
901         rpl_data = input_data.filter_data(
902             table, data=replacement, continue_on_error=True)
903         for job, builds in replacement.items():
904             for build in builds:
905                 for tst_name, tst_data in rpl_data[job][str(build)].items():
906                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
907                         continue
908                     tst_name_mod = _tpc_modify_test_name(tst_name)
909                     if u"across topologies" in table[u"title"].lower():
910                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
911                     if tbl_dict.get(tst_name_mod, None) is None:
912                         name = \
913                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
914                         if u"across testbeds" in table[u"title"].lower() or \
915                                 u"across topologies" in table[u"title"].lower():
916                             name = _tpc_modify_displayed_test_name(name)
917                         tbl_dict[tst_name_mod] = {
918                             u"name": name,
919                             u"ref-data": list(),
920                             u"cmp-data": list()
921                         }
922                     if create_new_list:
923                         create_new_list = False
924                         tbl_dict[tst_name_mod][u"ref-data"] = list()
925
926                     _tpc_insert_data(
927                         target=tbl_dict[tst_name_mod][u"ref-data"],
928                         src=tst_data,
929                         include_tests=table[u"include-tests"]
930                     )
931
932     for job, builds in table[u"compare"][u"data"].items():
933         for build in builds:
934             for tst_name, tst_data in data[job][str(build)].items():
935                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
936                     continue
937                 tst_name_mod = _tpc_modify_test_name(tst_name)
938                 if u"across topologies" in table[u"title"].lower():
939                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
940                 if tbl_dict.get(tst_name_mod, None) is None:
941                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
942                     if u"across testbeds" in table[u"title"].lower() or \
943                             u"across topologies" in table[u"title"].lower():
944                         name = _tpc_modify_displayed_test_name(name)
945                     tbl_dict[tst_name_mod] = {
946                         u"name": name,
947                         u"ref-data": list(),
948                         u"cmp-data": list()
949                     }
950                 _tpc_insert_data(
951                     target=tbl_dict[tst_name_mod][u"cmp-data"],
952                     src=tst_data,
953                     include_tests=table[u"include-tests"]
954                 )
955
956     replacement = table[u"compare"].get(u"data-replacement", None)
957     if replacement:
958         create_new_list = True
959         rpl_data = input_data.filter_data(
960             table, data=replacement, continue_on_error=True)
961         for job, builds in replacement.items():
962             for build in builds:
963                 for tst_name, tst_data in rpl_data[job][str(build)].items():
964                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
965                         continue
966                     tst_name_mod = _tpc_modify_test_name(tst_name)
967                     if u"across topologies" in table[u"title"].lower():
968                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
969                     if tbl_dict.get(tst_name_mod, None) is None:
970                         name = \
971                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
972                         if u"across testbeds" in table[u"title"].lower() or \
973                                 u"across topologies" in table[u"title"].lower():
974                             name = _tpc_modify_displayed_test_name(name)
975                         tbl_dict[tst_name_mod] = {
976                             u"name": name,
977                             u"ref-data": list(),
978                             u"cmp-data": list()
979                         }
980                     if create_new_list:
981                         create_new_list = False
982                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
983
984                     _tpc_insert_data(
985                         target=tbl_dict[tst_name_mod][u"cmp-data"],
986                         src=tst_data,
987                         include_tests=table[u"include-tests"]
988                     )
989
990     for item in history:
991         for job, builds in item[u"data"].items():
992             for build in builds:
993                 for tst_name, tst_data in data[job][str(build)].items():
994                     if item[u"nic"] not in tst_data[u"tags"]:
995                         continue
996                     tst_name_mod = _tpc_modify_test_name(tst_name)
997                     if u"across topologies" in table[u"title"].lower():
998                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
999                     if tbl_dict.get(tst_name_mod, None) is None:
1000                         continue
1001                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1002                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1003                     if tbl_dict[tst_name_mod][u"history"].\
1004                             get(item[u"title"], None) is None:
1005                         tbl_dict[tst_name_mod][u"history"][item[
1006                             u"title"]] = list()
1007                     try:
1008                         if table[u"include-tests"] == u"MRR":
1009                             res = tst_data[u"result"][u"receive-rate"]
1010                         elif table[u"include-tests"] == u"PDR":
1011                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1012                         elif table[u"include-tests"] == u"NDR":
1013                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1014                         else:
1015                             continue
1016                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1017                             append(res)
1018                     except (TypeError, KeyError):
1019                         pass
1020
1021     tbl_lst = list()
1022     footnote = False
1023     for tst_name in tbl_dict:
1024         item = [tbl_dict[tst_name][u"name"], ]
1025         if history:
1026             if tbl_dict[tst_name].get(u"history", None) is not None:
1027                 for hist_data in tbl_dict[tst_name][u"history"].values():
1028                     if hist_data:
1029                         item.append(round(mean(hist_data) / 1000000, 2))
1030                         item.append(round(stdev(hist_data) / 1000000, 2))
1031                     else:
1032                         item.extend([u"Not tested", u"Not tested"])
1033             else:
1034                 item.extend([u"Not tested", u"Not tested"])
1035         data_t = tbl_dict[tst_name][u"ref-data"]
1036         if data_t:
1037             item.append(round(mean(data_t) / 1000000, 2))
1038             item.append(round(stdev(data_t) / 1000000, 2))
1039         else:
1040             item.extend([u"Not tested", u"Not tested"])
1041         data_t = tbl_dict[tst_name][u"cmp-data"]
1042         if data_t:
1043             item.append(round(mean(data_t) / 1000000, 2))
1044             item.append(round(stdev(data_t) / 1000000, 2))
1045         else:
1046             item.extend([u"Not tested", u"Not tested"])
1047         if item[-2] == u"Not tested":
1048             pass
1049         elif item[-4] == u"Not tested":
1050             item.append(u"New in CSIT-2001")
1051         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1052         #     item.append(u"See footnote [1]")
1053         #     footnote = True
1054         elif item[-4] != 0:
1055             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1056         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1057             tbl_lst.append(item)
1058
1059     tbl_lst = _tpc_sort_table(tbl_lst)
1060
1061     # Generate csv tables:
1062     csv_file = f"{table[u'output-file']}.csv"
1063     with open(csv_file, u"wt") as file_handler:
1064         file_handler.write(header_str)
1065         for test in tbl_lst:
1066             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1067
1068     txt_file_name = f"{table[u'output-file']}.txt"
1069     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1070
1071     if footnote:
1072         with open(txt_file_name, u'a') as txt_file:
1073             txt_file.writelines([
1074                 u"\nFootnotes:\n",
1075                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1076                 u"2-node testbeds, dot1q encapsulation is now used on both "
1077                 u"links of SUT.\n",
1078                 u"    Previously dot1q was used only on a single link with the "
1079                 u"other link carrying untagged Ethernet frames. This changes "
1080                 u"results\n",
1081                 u"    in slightly lower throughput in CSIT-1908 for these "
1082                 u"tests. See release notes."
1083             ])
1084
1085     # Generate html table:
1086     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1087
1088
1089 def table_nics_comparison(table, input_data):
1090     """Generate the table(s) with algorithm: table_nics_comparison
1091     specified in the specification file.
1092
1093     :param table: Table to generate.
1094     :param input_data: Data to process.
1095     :type table: pandas.Series
1096     :type input_data: InputData
1097     """
1098
1099     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1100
1101     # Transform the data
1102     logging.info(
1103         f"    Creating the data set for the {table.get(u'type', u'')} "
1104         f"{table.get(u'title', u'')}."
1105     )
1106     data = input_data.filter_data(table, continue_on_error=True)
1107
1108     # Prepare the header of the tables
1109     try:
1110         header = [u"Test case", ]
1111
1112         if table[u"include-tests"] == u"MRR":
1113             hdr_param = u"Rec Rate"
1114         else:
1115             hdr_param = u"Thput"
1116
1117         header.extend(
1118             [
1119                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1120                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1121                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1122                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1123                 u"Delta [%]"
1124             ]
1125         )
1126
1127     except (AttributeError, KeyError) as err:
1128         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1129         return
1130
1131     # Prepare data to the table:
1132     tbl_dict = dict()
1133     for job, builds in table[u"data"].items():
1134         for build in builds:
1135             for tst_name, tst_data in data[job][str(build)].items():
1136                 tst_name_mod = _tpc_modify_test_name(tst_name)
1137                 if tbl_dict.get(tst_name_mod, None) is None:
1138                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1139                     tbl_dict[tst_name_mod] = {
1140                         u"name": name,
1141                         u"ref-data": list(),
1142                         u"cmp-data": list()
1143                     }
1144                 try:
1145                     result = None
1146                     if table[u"include-tests"] == u"MRR":
1147                         result = tst_data[u"result"][u"receive-rate"]
1148                     elif table[u"include-tests"] == u"PDR":
1149                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1150                     elif table[u"include-tests"] == u"NDR":
1151                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1152                     else:
1153                         continue
1154
1155                     if result and \
1156                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1157                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1158                     elif result and \
1159                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1160                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1161                 except (TypeError, KeyError) as err:
1162                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1163                     # No data in output.xml for this test
1164
1165     tbl_lst = list()
1166     for tst_name in tbl_dict:
1167         item = [tbl_dict[tst_name][u"name"], ]
1168         data_t = tbl_dict[tst_name][u"ref-data"]
1169         if data_t:
1170             item.append(round(mean(data_t) / 1000000, 2))
1171             item.append(round(stdev(data_t) / 1000000, 2))
1172         else:
1173             item.extend([None, None])
1174         data_t = tbl_dict[tst_name][u"cmp-data"]
1175         if data_t:
1176             item.append(round(mean(data_t) / 1000000, 2))
1177             item.append(round(stdev(data_t) / 1000000, 2))
1178         else:
1179             item.extend([None, None])
1180         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1181             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1182         if len(item) == len(header):
1183             tbl_lst.append(item)
1184
1185     # Sort the table according to the relative change
1186     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1187
1188     # Generate csv tables:
1189     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1190         file_handler.write(u",".join(header) + u"\n")
1191         for test in tbl_lst:
1192             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1193
1194     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1195                               f"{table[u'output-file']}.txt")
1196
1197     # Generate html table:
1198     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1199
1200
1201 def table_soak_vs_ndr(table, input_data):
1202     """Generate the table(s) with algorithm: table_soak_vs_ndr
1203     specified in the specification file.
1204
1205     :param table: Table to generate.
1206     :param input_data: Data to process.
1207     :type table: pandas.Series
1208     :type input_data: InputData
1209     """
1210
1211     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1212
1213     # Transform the data
1214     logging.info(
1215         f"    Creating the data set for the {table.get(u'type', u'')} "
1216         f"{table.get(u'title', u'')}."
1217     )
1218     data = input_data.filter_data(table, continue_on_error=True)
1219
1220     # Prepare the header of the table
1221     try:
1222         header = [
1223             u"Test case",
1224             f"{table[u'reference'][u'title']} Thput [Mpps]",
1225             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1226             f"{table[u'compare'][u'title']} Thput [Mpps]",
1227             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1228             u"Delta [%]", u"Stdev of delta [%]"
1229         ]
1230         header_str = u",".join(header) + u"\n"
1231     except (AttributeError, KeyError) as err:
1232         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1233         return
1234
1235     # Create a list of available SOAK test results:
1236     tbl_dict = dict()
1237     for job, builds in table[u"compare"][u"data"].items():
1238         for build in builds:
1239             for tst_name, tst_data in data[job][str(build)].items():
1240                 if tst_data[u"type"] == u"SOAK":
1241                     tst_name_mod = tst_name.replace(u"-soak", u"")
1242                     if tbl_dict.get(tst_name_mod, None) is None:
1243                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1244                         nic = groups.group(0) if groups else u""
1245                         name = (
1246                             f"{nic}-"
1247                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1248                         )
1249                         tbl_dict[tst_name_mod] = {
1250                             u"name": name,
1251                             u"ref-data": list(),
1252                             u"cmp-data": list()
1253                         }
1254                     try:
1255                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1256                             tst_data[u"throughput"][u"LOWER"])
1257                     except (KeyError, TypeError):
1258                         pass
1259     tests_lst = tbl_dict.keys()
1260
1261     # Add corresponding NDR test results:
1262     for job, builds in table[u"reference"][u"data"].items():
1263         for build in builds:
1264             for tst_name, tst_data in data[job][str(build)].items():
1265                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1266                     replace(u"-mrr", u"")
1267                 if tst_name_mod not in tests_lst:
1268                     continue
1269                 try:
1270                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1271                         continue
1272                     if table[u"include-tests"] == u"MRR":
1273                         result = tst_data[u"result"][u"receive-rate"]
1274                     elif table[u"include-tests"] == u"PDR":
1275                         result = \
1276                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1277                     elif table[u"include-tests"] == u"NDR":
1278                         result = \
1279                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1280                     else:
1281                         result = None
1282                     if result is not None:
1283                         tbl_dict[tst_name_mod][u"ref-data"].append(
1284                             result)
1285                 except (KeyError, TypeError):
1286                     continue
1287
1288     tbl_lst = list()
1289     for tst_name in tbl_dict:
1290         item = [tbl_dict[tst_name][u"name"], ]
1291         data_r = tbl_dict[tst_name][u"ref-data"]
1292         if data_r:
1293             data_r_mean = mean(data_r)
1294             item.append(round(data_r_mean / 1000000, 2))
1295             data_r_stdev = stdev(data_r)
1296             item.append(round(data_r_stdev / 1000000, 2))
1297         else:
1298             data_r_mean = None
1299             data_r_stdev = None
1300             item.extend([None, None])
1301         data_c = tbl_dict[tst_name][u"cmp-data"]
1302         if data_c:
1303             data_c_mean = mean(data_c)
1304             item.append(round(data_c_mean / 1000000, 2))
1305             data_c_stdev = stdev(data_c)
1306             item.append(round(data_c_stdev / 1000000, 2))
1307         else:
1308             data_c_mean = None
1309             data_c_stdev = None
1310             item.extend([None, None])
1311         if data_r_mean and data_c_mean:
1312             delta, d_stdev = relative_change_stdev(
1313                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1314             item.append(round(delta, 2))
1315             item.append(round(d_stdev, 2))
1316             tbl_lst.append(item)
1317
1318     # Sort the table according to the relative change
1319     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1320
1321     # Generate csv tables:
1322     csv_file = f"{table[u'output-file']}.csv"
1323     with open(csv_file, u"wt") as file_handler:
1324         file_handler.write(header_str)
1325         for test in tbl_lst:
1326             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1327
1328     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1329
1330     # Generate html table:
1331     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1332
1333
1334 def table_perf_trending_dash(table, input_data):
1335     """Generate the table(s) with algorithm:
1336     table_perf_trending_dash
1337     specified in the specification file.
1338
1339     :param table: Table to generate.
1340     :param input_data: Data to process.
1341     :type table: pandas.Series
1342     :type input_data: InputData
1343     """
1344
1345     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1346
1347     # Transform the data
1348     logging.info(
1349         f"    Creating the data set for the {table.get(u'type', u'')} "
1350         f"{table.get(u'title', u'')}."
1351     )
1352     data = input_data.filter_data(table, continue_on_error=True)
1353
1354     # Prepare the header of the tables
1355     header = [
1356         u"Test Case",
1357         u"Trend [Mpps]",
1358         u"Short-Term Change [%]",
1359         u"Long-Term Change [%]",
1360         u"Regressions [#]",
1361         u"Progressions [#]"
1362     ]
1363     header_str = u",".join(header) + u"\n"
1364
1365     # Prepare data to the table:
1366     tbl_dict = dict()
1367     for job, builds in table[u"data"].items():
1368         for build in builds:
1369             for tst_name, tst_data in data[job][str(build)].items():
1370                 if tst_name.lower() in table.get(u"ignore-list", list()):
1371                     continue
1372                 if tbl_dict.get(tst_name, None) is None:
1373                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1374                     if not groups:
1375                         continue
1376                     nic = groups.group(0)
1377                     tbl_dict[tst_name] = {
1378                         u"name": f"{nic}-{tst_data[u'name']}",
1379                         u"data": OrderedDict()
1380                     }
1381                 try:
1382                     tbl_dict[tst_name][u"data"][str(build)] = \
1383                         tst_data[u"result"][u"receive-rate"]
1384                 except (TypeError, KeyError):
1385                     pass  # No data in output.xml for this test
1386
1387     tbl_lst = list()
1388     for tst_name in tbl_dict:
1389         data_t = tbl_dict[tst_name][u"data"]
1390         if len(data_t) < 2:
1391             continue
1392
1393         classification_lst, avgs = classify_anomalies(data_t)
1394
1395         win_size = min(len(data_t), table[u"window"])
1396         long_win_size = min(len(data_t), table[u"long-trend-window"])
1397
1398         try:
1399             max_long_avg = max(
1400                 [x for x in avgs[-long_win_size:-win_size]
1401                  if not isnan(x)])
1402         except ValueError:
1403             max_long_avg = nan
1404         last_avg = avgs[-1]
1405         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1406
1407         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1408             rel_change_last = nan
1409         else:
1410             rel_change_last = round(
1411                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1412
1413         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1414             rel_change_long = nan
1415         else:
1416             rel_change_long = round(
1417                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1418
1419         if classification_lst:
1420             if isnan(rel_change_last) and isnan(rel_change_long):
1421                 continue
1422             if isnan(last_avg) or isnan(rel_change_last) or \
1423                     isnan(rel_change_long):
1424                 continue
1425             tbl_lst.append(
1426                 [tbl_dict[tst_name][u"name"],
1427                  round(last_avg / 1000000, 2),
1428                  rel_change_last,
1429                  rel_change_long,
1430                  classification_lst[-win_size:].count(u"regression"),
1431                  classification_lst[-win_size:].count(u"progression")])
1432
1433     tbl_lst.sort(key=lambda rel: rel[0])
1434
1435     tbl_sorted = list()
1436     for nrr in range(table[u"window"], -1, -1):
1437         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1438         for nrp in range(table[u"window"], -1, -1):
1439             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1440             tbl_out.sort(key=lambda rel: rel[2])
1441             tbl_sorted.extend(tbl_out)
1442
1443     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1444
1445     logging.info(f"    Writing file: {file_name}")
1446     with open(file_name, u"wt") as file_handler:
1447         file_handler.write(header_str)
1448         for test in tbl_sorted:
1449             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1450
1451     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1452     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1453
1454
1455 def _generate_url(testbed, test_name):
1456     """Generate URL to a trending plot from the name of the test case.
1457
1458     :param testbed: The testbed used for testing.
1459     :param test_name: The name of the test case.
1460     :type testbed: str
1461     :type test_name: str
1462     :returns: The URL to the plot with the trending data for the given test
1463         case.
1464     :rtype str
1465     """
1466
1467     if u"x520" in test_name:
1468         nic = u"x520"
1469     elif u"x710" in test_name:
1470         nic = u"x710"
1471     elif u"xl710" in test_name:
1472         nic = u"xl710"
1473     elif u"xxv710" in test_name:
1474         nic = u"xxv710"
1475     elif u"vic1227" in test_name:
1476         nic = u"vic1227"
1477     elif u"vic1385" in test_name:
1478         nic = u"vic1385"
1479     elif u"x553" in test_name:
1480         nic = u"x553"
1481     else:
1482         nic = u""
1483
1484     if u"64b" in test_name:
1485         frame_size = u"64b"
1486     elif u"78b" in test_name:
1487         frame_size = u"78b"
1488     elif u"imix" in test_name:
1489         frame_size = u"imix"
1490     elif u"9000b" in test_name:
1491         frame_size = u"9000b"
1492     elif u"1518b" in test_name:
1493         frame_size = u"1518b"
1494     elif u"114b" in test_name:
1495         frame_size = u"114b"
1496     else:
1497         frame_size = u""
1498
1499     if u"1t1c" in test_name or \
1500         (u"-1c-" in test_name and
1501          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1502         cores = u"1t1c"
1503     elif u"2t2c" in test_name or \
1504          (u"-2c-" in test_name and
1505           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1506         cores = u"2t2c"
1507     elif u"4t4c" in test_name or \
1508          (u"-4c-" in test_name and
1509           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1510         cores = u"4t4c"
1511     elif u"2t1c" in test_name or \
1512          (u"-1c-" in test_name and
1513           testbed in (u"2n-skx", u"3n-skx")):
1514         cores = u"2t1c"
1515     elif u"4t2c" in test_name:
1516         cores = u"4t2c"
1517     elif u"8t4c" in test_name:
1518         cores = u"8t4c"
1519     else:
1520         cores = u""
1521
1522     if u"testpmd" in test_name:
1523         driver = u"testpmd"
1524     elif u"l3fwd" in test_name:
1525         driver = u"l3fwd"
1526     elif u"avf" in test_name:
1527         driver = u"avf"
1528     elif u"dnv" in testbed or u"tsh" in testbed:
1529         driver = u"ixgbe"
1530     else:
1531         driver = u"dpdk"
1532
1533     if u"acl" in test_name or \
1534             u"macip" in test_name or \
1535             u"nat" in test_name or \
1536             u"policer" in test_name or \
1537             u"cop" in test_name:
1538         bsf = u"features"
1539     elif u"scale" in test_name:
1540         bsf = u"scale"
1541     elif u"base" in test_name:
1542         bsf = u"base"
1543     else:
1544         bsf = u"base"
1545
1546     if u"114b" in test_name and u"vhost" in test_name:
1547         domain = u"vts"
1548     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1549         domain = u"dpdk"
1550     elif u"memif" in test_name:
1551         domain = u"container_memif"
1552     elif u"srv6" in test_name:
1553         domain = u"srv6"
1554     elif u"vhost" in test_name:
1555         domain = u"vhost"
1556         if u"vppl2xc" in test_name:
1557             driver += u"-vpp"
1558         else:
1559             driver += u"-testpmd"
1560         if u"lbvpplacp" in test_name:
1561             bsf += u"-link-bonding"
1562     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1563         domain = u"nf_service_density_vnfc"
1564     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1565         domain = u"nf_service_density_cnfc"
1566     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1567         domain = u"nf_service_density_cnfp"
1568     elif u"ipsec" in test_name:
1569         domain = u"ipsec"
1570         if u"sw" in test_name:
1571             bsf += u"-sw"
1572         elif u"hw" in test_name:
1573             bsf += u"-hw"
1574     elif u"ethip4vxlan" in test_name:
1575         domain = u"ip4_tunnels"
1576     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1577         domain = u"ip4"
1578     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1579         domain = u"ip6"
1580     elif u"l2xcbase" in test_name or \
1581             u"l2xcscale" in test_name or \
1582             u"l2bdbasemaclrn" in test_name or \
1583             u"l2bdscale" in test_name or \
1584             u"l2patch" in test_name:
1585         domain = u"l2"
1586     else:
1587         domain = u""
1588
1589     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1590     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1591
1592     return file_name + anchor_name
1593
1594
1595 def table_perf_trending_dash_html(table, input_data):
1596     """Generate the table(s) with algorithm:
1597     table_perf_trending_dash_html specified in the specification
1598     file.
1599
1600     :param table: Table to generate.
1601     :param input_data: Data to process.
1602     :type table: dict
1603     :type input_data: InputData
1604     """
1605
1606     _ = input_data
1607
1608     if not table.get(u"testbed", None):
1609         logging.error(
1610             f"The testbed is not defined for the table "
1611             f"{table.get(u'title', u'')}."
1612         )
1613         return
1614
1615     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1616
1617     try:
1618         with open(table[u"input-file"], u'rt') as csv_file:
1619             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1620     except KeyError:
1621         logging.warning(u"The input file is not defined.")
1622         return
1623     except csv.Error as err:
1624         logging.warning(
1625             f"Not possible to process the file {table[u'input-file']}.\n"
1626             f"{repr(err)}"
1627         )
1628         return
1629
1630     # Table:
1631     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1632
1633     # Table header:
1634     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1635     for idx, item in enumerate(csv_lst[0]):
1636         alignment = u"left" if idx == 0 else u"center"
1637         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1638         thead.text = item
1639
1640     # Rows:
1641     colors = {
1642         u"regression": (
1643             u"#ffcccc",
1644             u"#ff9999"
1645         ),
1646         u"progression": (
1647             u"#c6ecc6",
1648             u"#9fdf9f"
1649         ),
1650         u"normal": (
1651             u"#e9f1fb",
1652             u"#d4e4f7"
1653         )
1654     }
1655     for r_idx, row in enumerate(csv_lst[1:]):
1656         if int(row[4]):
1657             color = u"regression"
1658         elif int(row[5]):
1659             color = u"progression"
1660         else:
1661             color = u"normal"
1662         trow = ET.SubElement(
1663             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1664         )
1665
1666         # Columns:
1667         for c_idx, item in enumerate(row):
1668             tdata = ET.SubElement(
1669                 trow,
1670                 u"td",
1671                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1672             )
1673             # Name:
1674             if c_idx == 0:
1675                 ref = ET.SubElement(
1676                     tdata,
1677                     u"a",
1678                     attrib=dict(
1679                         href=f"../trending/"
1680                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1681                     )
1682                 )
1683                 ref.text = item
1684             else:
1685                 tdata.text = item
1686     try:
1687         with open(table[u"output-file"], u'w') as html_file:
1688             logging.info(f"    Writing file: {table[u'output-file']}")
1689             html_file.write(u".. raw:: html\n\n\t")
1690             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1691             html_file.write(u"\n\t<p><br><br></p>\n")
1692     except KeyError:
1693         logging.warning(u"The output file is not defined.")
1694         return
1695
1696
1697 def table_last_failed_tests(table, input_data):
1698     """Generate the table(s) with algorithm: table_last_failed_tests
1699     specified in the specification file.
1700
1701     :param table: Table to generate.
1702     :param input_data: Data to process.
1703     :type table: pandas.Series
1704     :type input_data: InputData
1705     """
1706
1707     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1708
1709     # Transform the data
1710     logging.info(
1711         f"    Creating the data set for the {table.get(u'type', u'')} "
1712         f"{table.get(u'title', u'')}."
1713     )
1714
1715     data = input_data.filter_data(table, continue_on_error=True)
1716
1717     if data is None or data.empty:
1718         logging.warning(
1719             f"    No data for the {table.get(u'type', u'')} "
1720             f"{table.get(u'title', u'')}."
1721         )
1722         return
1723
1724     tbl_list = list()
1725     for job, builds in table[u"data"].items():
1726         for build in builds:
1727             build = str(build)
1728             try:
1729                 version = input_data.metadata(job, build).get(u"version", u"")
1730             except KeyError:
1731                 logging.error(f"Data for {job}: {build} is not present.")
1732                 return
1733             tbl_list.append(build)
1734             tbl_list.append(version)
1735             failed_tests = list()
1736             passed = 0
1737             failed = 0
1738             for tst_data in data[job][build].values:
1739                 if tst_data[u"status"] != u"FAIL":
1740                     passed += 1
1741                     continue
1742                 failed += 1
1743                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1744                 if not groups:
1745                     continue
1746                 nic = groups.group(0)
1747                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1748             tbl_list.append(str(passed))
1749             tbl_list.append(str(failed))
1750             tbl_list.extend(failed_tests)
1751
1752     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1753     logging.info(f"    Writing file: {file_name}")
1754     with open(file_name, u"wt") as file_handler:
1755         for test in tbl_list:
1756             file_handler.write(test + u'\n')
1757
1758
1759 def table_failed_tests(table, input_data):
1760     """Generate the table(s) with algorithm: table_failed_tests
1761     specified in the specification file.
1762
1763     :param table: Table to generate.
1764     :param input_data: Data to process.
1765     :type table: pandas.Series
1766     :type input_data: InputData
1767     """
1768
1769     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1770
1771     # Transform the data
1772     logging.info(
1773         f"    Creating the data set for the {table.get(u'type', u'')} "
1774         f"{table.get(u'title', u'')}."
1775     )
1776     data = input_data.filter_data(table, continue_on_error=True)
1777
1778     # Prepare the header of the tables
1779     header = [
1780         u"Test Case",
1781         u"Failures [#]",
1782         u"Last Failure [Time]",
1783         u"Last Failure [VPP-Build-Id]",
1784         u"Last Failure [CSIT-Job-Build-Id]"
1785     ]
1786
1787     # Generate the data for the table according to the model in the table
1788     # specification
1789
1790     now = dt.utcnow()
1791     timeperiod = timedelta(int(table.get(u"window", 7)))
1792
1793     tbl_dict = dict()
1794     for job, builds in table[u"data"].items():
1795         for build in builds:
1796             build = str(build)
1797             for tst_name, tst_data in data[job][build].items():
1798                 if tst_name.lower() in table.get(u"ignore-list", list()):
1799                     continue
1800                 if tbl_dict.get(tst_name, None) is None:
1801                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1802                     if not groups:
1803                         continue
1804                     nic = groups.group(0)
1805                     tbl_dict[tst_name] = {
1806                         u"name": f"{nic}-{tst_data[u'name']}",
1807                         u"data": OrderedDict()
1808                     }
1809                 try:
1810                     generated = input_data.metadata(job, build).\
1811                         get(u"generated", u"")
1812                     if not generated:
1813                         continue
1814                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1815                     if (now - then) <= timeperiod:
1816                         tbl_dict[tst_name][u"data"][build] = (
1817                             tst_data[u"status"],
1818                             generated,
1819                             input_data.metadata(job, build).get(u"version",
1820                                                                 u""),
1821                             build
1822                         )
1823                 except (TypeError, KeyError) as err:
1824                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1825
1826     max_fails = 0
1827     tbl_lst = list()
1828     for tst_data in tbl_dict.values():
1829         fails_nr = 0
1830         fails_last_date = u""
1831         fails_last_vpp = u""
1832         fails_last_csit = u""
1833         for val in tst_data[u"data"].values():
1834             if val[0] == u"FAIL":
1835                 fails_nr += 1
1836                 fails_last_date = val[1]
1837                 fails_last_vpp = val[2]
1838                 fails_last_csit = val[3]
1839         if fails_nr:
1840             max_fails = fails_nr if fails_nr > max_fails else max_fails
1841             tbl_lst.append(
1842                 [
1843                     tst_data[u"name"],
1844                     fails_nr,
1845                     fails_last_date,
1846                     fails_last_vpp,
1847                     f"mrr-daily-build-{fails_last_csit}"
1848                 ]
1849             )
1850
1851     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1852     tbl_sorted = list()
1853     for nrf in range(max_fails, -1, -1):
1854         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1855         tbl_sorted.extend(tbl_fails)
1856
1857     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1858     logging.info(f"    Writing file: {file_name}")
1859     with open(file_name, u"wt") as file_handler:
1860         file_handler.write(u",".join(header) + u"\n")
1861         for test in tbl_sorted:
1862             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1863
1864     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1865     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1866
1867
1868 def table_failed_tests_html(table, input_data):
1869     """Generate the table(s) with algorithm: table_failed_tests_html
1870     specified in the specification file.
1871
1872     :param table: Table to generate.
1873     :param input_data: Data to process.
1874     :type table: pandas.Series
1875     :type input_data: InputData
1876     """
1877
1878     _ = input_data
1879
1880     if not table.get(u"testbed", None):
1881         logging.error(
1882             f"The testbed is not defined for the table "
1883             f"{table.get(u'title', u'')}."
1884         )
1885         return
1886
1887     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1888
1889     try:
1890         with open(table[u"input-file"], u'rt') as csv_file:
1891             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1892     except KeyError:
1893         logging.warning(u"The input file is not defined.")
1894         return
1895     except csv.Error as err:
1896         logging.warning(
1897             f"Not possible to process the file {table[u'input-file']}.\n"
1898             f"{repr(err)}"
1899         )
1900         return
1901
1902     # Table:
1903     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1904
1905     # Table header:
1906     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1907     for idx, item in enumerate(csv_lst[0]):
1908         alignment = u"left" if idx == 0 else u"center"
1909         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1910         thead.text = item
1911
1912     # Rows:
1913     colors = (u"#e9f1fb", u"#d4e4f7")
1914     for r_idx, row in enumerate(csv_lst[1:]):
1915         background = colors[r_idx % 2]
1916         trow = ET.SubElement(
1917             failed_tests, u"tr", attrib=dict(bgcolor=background)
1918         )
1919
1920         # Columns:
1921         for c_idx, item in enumerate(row):
1922             tdata = ET.SubElement(
1923                 trow,
1924                 u"td",
1925                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1926             )
1927             # Name:
1928             if c_idx == 0:
1929                 ref = ET.SubElement(
1930                     tdata,
1931                     u"a",
1932                     attrib=dict(
1933                         href=f"../trending/"
1934                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1935                     )
1936                 )
1937                 ref.text = item
1938             else:
1939                 tdata.text = item
1940     try:
1941         with open(table[u"output-file"], u'w') as html_file:
1942             logging.info(f"    Writing file: {table[u'output-file']}")
1943             html_file.write(u".. raw:: html\n\n\t")
1944             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1945             html_file.write(u"\n\t<p><br><br></p>\n")
1946     except KeyError:
1947         logging.warning(u"The output file is not defined.")
1948         return