72d93f8247fe545e9283662b18a870721fb2eb7a
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
30 import pandas as pd
31
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
34
35 from pal_utils import mean, stdev, classify_anomalies, \
36     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
37
38
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40
41
42 def generate_tables(spec, data):
43     """Generate all tables specified in the specification file.
44
45     :param spec: Specification read from the specification file.
46     :param data: Data to process.
47     :type spec: Specification
48     :type data: InputData
49     """
50
51     generator = {
52         u"table_merged_details": table_merged_details,
53         u"table_perf_comparison": table_perf_comparison,
54         u"table_perf_comparison_nic": table_perf_comparison_nic,
55         u"table_nics_comparison": table_nics_comparison,
56         u"table_soak_vs_ndr": table_soak_vs_ndr,
57         u"table_perf_trending_dash": table_perf_trending_dash,
58         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59         u"table_last_failed_tests": table_last_failed_tests,
60         u"table_failed_tests": table_failed_tests,
61         u"table_failed_tests_html": table_failed_tests_html,
62         u"table_oper_data_html": table_oper_data_html,
63         u"table_comparison": table_comparison,
64         u"table_weekly_comparison": table_weekly_comparison
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_oper_data_html(table, input_data):
82     """Generate the table(s) with algorithm: html_table_oper_data
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
92     # Transform the data
93     logging.info(
94         f"    Creating the data set for the {table.get(u'type', u'')} "
95         f"{table.get(u'title', u'')}."
96     )
97     data = input_data.filter_data(
98         table,
99         params=[u"name", u"parent", u"show-run", u"type"],
100         continue_on_error=True
101     )
102     if data.empty:
103         return
104     data = input_data.merge_data(data)
105
106     sort_tests = table.get(u"sort", None)
107     if sort_tests:
108         args = dict(
109             inplace=True,
110             ascending=(sort_tests == u"ascending")
111         )
112         data.sort_index(**args)
113
114     suites = input_data.filter_data(
115         table,
116         continue_on_error=True,
117         data_set=u"suites"
118     )
119     if suites.empty:
120         return
121     suites = input_data.merge_data(suites)
122
123     def _generate_html_table(tst_data):
124         """Generate an HTML table with operational data for the given test.
125
126         :param tst_data: Test data to be used to generate the table.
127         :type tst_data: pandas.Series
128         :returns: HTML table with operational data.
129         :rtype: str
130         """
131
132         colors = {
133             u"header": u"#7eade7",
134             u"empty": u"#ffffff",
135             u"body": (u"#e9f1fb", u"#d4e4f7")
136         }
137
138         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = tst_data[u"name"]
145
146         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
147         thead = ET.SubElement(
148             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
149         )
150         thead.text = u"\t"
151
152         if tst_data.get(u"show-run", u"No Data") == u"No Data":
153             trow = ET.SubElement(
154                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
155             )
156             tcol = ET.SubElement(
157                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
158             )
159             tcol.text = u"No Data"
160
161             trow = ET.SubElement(
162                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
163             )
164             thead = ET.SubElement(
165                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
166             )
167             font = ET.SubElement(
168                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
169             )
170             font.text = u"."
171             return str(ET.tostring(tbl, encoding=u"unicode"))
172
173         tbl_hdr = (
174             u"Name",
175             u"Nr of Vectors",
176             u"Nr of Packets",
177             u"Suspends",
178             u"Cycles per Packet",
179             u"Average Vector Size"
180         )
181
182         for dut_data in tst_data[u"show-run"].values():
183             trow = ET.SubElement(
184                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
185             )
186             tcol = ET.SubElement(
187                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
188             )
189             if dut_data.get(u"threads", None) is None:
190                 tcol.text = u"No Data"
191                 continue
192
193             bold = ET.SubElement(tcol, u"b")
194             bold.text = (
195                 f"Host IP: {dut_data.get(u'host', '')}, "
196                 f"Socket: {dut_data.get(u'socket', '')}"
197             )
198             trow = ET.SubElement(
199                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
200             )
201             thead = ET.SubElement(
202                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
203             )
204             thead.text = u"\t"
205
206             for thread_nr, thread in dut_data[u"threads"].items():
207                 trow = ET.SubElement(
208                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
209                 )
210                 tcol = ET.SubElement(
211                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
212                 )
213                 bold = ET.SubElement(tcol, u"b")
214                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
215                 trow = ET.SubElement(
216                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
217                 )
218                 for idx, col in enumerate(tbl_hdr):
219                     tcol = ET.SubElement(
220                         trow, u"td",
221                         attrib=dict(align=u"right" if idx else u"left")
222                     )
223                     font = ET.SubElement(
224                         tcol, u"font", attrib=dict(size=u"2")
225                     )
226                     bold = ET.SubElement(font, u"b")
227                     bold.text = col
228                 for row_nr, row in enumerate(thread):
229                     trow = ET.SubElement(
230                         tbl, u"tr",
231                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
232                     )
233                     for idx, col in enumerate(row):
234                         tcol = ET.SubElement(
235                             trow, u"td",
236                             attrib=dict(align=u"right" if idx else u"left")
237                         )
238                         font = ET.SubElement(
239                             tcol, u"font", attrib=dict(size=u"2")
240                         )
241                         if isinstance(col, float):
242                             font.text = f"{col:.2f}"
243                         else:
244                             font.text = str(col)
245                 trow = ET.SubElement(
246                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
247                 )
248                 thead = ET.SubElement(
249                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250                 )
251                 thead.text = u"\t"
252
253         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
254         thead = ET.SubElement(
255             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
256         )
257         font = ET.SubElement(
258             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
259         )
260         font.text = u"."
261
262         return str(ET.tostring(tbl, encoding=u"unicode"))
263
264     for suite in suites.values:
265         html_table = str()
266         for test_data in data.values:
267             if test_data[u"parent"] not in suite[u"name"]:
268                 continue
269             html_table += _generate_html_table(test_data)
270         if not html_table:
271             continue
272         try:
273             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
274             with open(f"{file_name}", u'w') as html_file:
275                 logging.info(f"    Writing file: {file_name}")
276                 html_file.write(u".. raw:: html\n\n\t")
277                 html_file.write(html_table)
278                 html_file.write(u"\n\t<p><br><br></p>\n")
279         except KeyError:
280             logging.warning(u"The output file is not defined.")
281             return
282     logging.info(u"  Done.")
283
284
285 def table_merged_details(table, input_data):
286     """Generate the table(s) with algorithm: table_merged_details
287     specified in the specification file.
288
289     :param table: Table to generate.
290     :param input_data: Data to process.
291     :type table: pandas.Series
292     :type input_data: InputData
293     """
294
295     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
296
297     # Transform the data
298     logging.info(
299         f"    Creating the data set for the {table.get(u'type', u'')} "
300         f"{table.get(u'title', u'')}."
301     )
302     data = input_data.filter_data(table, continue_on_error=True)
303     data = input_data.merge_data(data)
304
305     sort_tests = table.get(u"sort", None)
306     if sort_tests:
307         args = dict(
308             inplace=True,
309             ascending=(sort_tests == u"ascending")
310         )
311         data.sort_index(**args)
312
313     suites = input_data.filter_data(
314         table, continue_on_error=True, data_set=u"suites")
315     suites = input_data.merge_data(suites)
316
317     # Prepare the header of the tables
318     header = list()
319     for column in table[u"columns"]:
320         header.append(
321             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
322         )
323
324     for suite in suites.values:
325         # Generate data
326         suite_name = suite[u"name"]
327         table_lst = list()
328         for test in data.keys():
329             if data[test][u"parent"] not in suite_name:
330                 continue
331             row_lst = list()
332             for column in table[u"columns"]:
333                 try:
334                     col_data = str(data[test][column[
335                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
336                     # Do not include tests with "Test Failed" in test message
337                     if u"Test Failed" in col_data:
338                         continue
339                     col_data = col_data.replace(
340                         u"No Data", u"Not Captured     "
341                     )
342                     if column[u"data"].split(u" ")[1] in (u"name", ):
343                         if len(col_data) > 30:
344                             col_data_lst = col_data.split(u"-")
345                             half = int(len(col_data_lst) / 2)
346                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
347                                        f"- |br| " \
348                                        f"{u'-'.join(col_data_lst[half:])}"
349                         col_data = f" |prein| {col_data} |preout| "
350                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
351                         # Temporary solution: remove NDR results from message:
352                         if bool(table.get(u'remove-ndr', False)):
353                             try:
354                                 col_data = col_data.split(u" |br| ", 1)[1]
355                             except IndexError:
356                                 pass
357                         col_data = f" |prein| {col_data} |preout| "
358                     elif column[u"data"].split(u" ")[1] in \
359                             (u"conf-history", u"show-run"):
360                         col_data = col_data.replace(u" |br| ", u"", 1)
361                         col_data = f" |prein| {col_data[:-5]} |preout| "
362                     row_lst.append(f'"{col_data}"')
363                 except KeyError:
364                     row_lst.append(u'"Not captured"')
365             if len(row_lst) == len(table[u"columns"]):
366                 table_lst.append(row_lst)
367
368         # Write the data to file
369         if table_lst:
370             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
371             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
372             logging.info(f"      Writing file: {file_name}")
373             with open(file_name, u"wt") as file_handler:
374                 file_handler.write(u",".join(header) + u"\n")
375                 for item in table_lst:
376                     file_handler.write(u",".join(item) + u"\n")
377
378     logging.info(u"  Done.")
379
380
381 def _tpc_modify_test_name(test_name, ignore_nic=False):
382     """Modify a test name by replacing its parts.
383
384     :param test_name: Test name to be modified.
385     :param ignore_nic: If True, NIC is removed from TC name.
386     :type test_name: str
387     :type ignore_nic: bool
388     :returns: Modified test name.
389     :rtype: str
390     """
391     test_name_mod = test_name.\
392         replace(u"-ndrpdrdisc", u""). \
393         replace(u"-ndrpdr", u"").\
394         replace(u"-pdrdisc", u""). \
395         replace(u"-ndrdisc", u"").\
396         replace(u"-pdr", u""). \
397         replace(u"-ndr", u""). \
398         replace(u"1t1c", u"1c").\
399         replace(u"2t1c", u"1c"). \
400         replace(u"2t2c", u"2c").\
401         replace(u"4t2c", u"2c"). \
402         replace(u"4t4c", u"4c").\
403         replace(u"8t4c", u"4c")
404
405     if ignore_nic:
406         return re.sub(REGEX_NIC, u"", test_name_mod)
407     return test_name_mod
408
409
410 def _tpc_modify_displayed_test_name(test_name):
411     """Modify a test name which is displayed in a table by replacing its parts.
412
413     :param test_name: Test name to be modified.
414     :type test_name: str
415     :returns: Modified test name.
416     :rtype: str
417     """
418     return test_name.\
419         replace(u"1t1c", u"1c").\
420         replace(u"2t1c", u"1c"). \
421         replace(u"2t2c", u"2c").\
422         replace(u"4t2c", u"2c"). \
423         replace(u"4t4c", u"4c").\
424         replace(u"8t4c", u"4c")
425
426
427 def _tpc_insert_data(target, src, include_tests):
428     """Insert src data to the target structure.
429
430     :param target: Target structure where the data is placed.
431     :param src: Source data to be placed into the target stucture.
432     :param include_tests: Which results will be included (MRR, NDR, PDR).
433     :type target: list
434     :type src: dict
435     :type include_tests: str
436     """
437     try:
438         if include_tests == u"MRR":
439             target.append(
440                 (
441                     src[u"result"][u"receive-rate"],
442                     src[u"result"][u"receive-stdev"]
443                 )
444             )
445         elif include_tests == u"PDR":
446             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
447         elif include_tests == u"NDR":
448             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
449     except (KeyError, TypeError):
450         pass
451
452
453 def _tpc_sort_table(table):
454     """Sort the table this way:
455
456     1. Put "New in CSIT-XXXX" at the first place.
457     2. Put "See footnote" at the second place.
458     3. Sort the rest by "Delta".
459
460     :param table: Table to sort.
461     :type table: list
462     :returns: Sorted table.
463     :rtype: list
464     """
465
466     tbl_new = list()
467     tbl_see = list()
468     tbl_delta = list()
469     for item in table:
470         if isinstance(item[-1], str):
471             if u"New in CSIT" in item[-1]:
472                 tbl_new.append(item)
473             elif u"See footnote" in item[-1]:
474                 tbl_see.append(item)
475         else:
476             tbl_delta.append(item)
477
478     # Sort the tables:
479     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
480     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
481     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
482     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
483     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
484
485     # Put the tables together:
486     table = list()
487     # We do not want "New in CSIT":
488     # table.extend(tbl_new)
489     table.extend(tbl_see)
490     table.extend(tbl_delta)
491
492     return table
493
494
495 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
496                              footnote=u"", sort_data=True, title=u"",
497                              generate_rst=True):
498     """Generate html table from input data with simple sorting possibility.
499
500     :param header: Table header.
501     :param data: Input data to be included in the table. It is a list of lists.
502         Inner lists are rows in the table. All inner lists must be of the same
503         length. The length of these lists must be the same as the length of the
504         header.
505     :param out_file_name: The name (relative or full path) where the
506         generated html table is written.
507     :param legend: The legend to display below the table.
508     :param footnote: The footnote to display below the table (and legend).
509     :param sort_data: If True the data sorting is enabled.
510     :param title: The table (and file) title.
511     :param generate_rst: If True, wrapping rst file is generated.
512     :type header: list
513     :type data: list of lists
514     :type out_file_name: str
515     :type legend: str
516     :type footnote: str
517     :type sort_data: bool
518     :type title: str
519     :type generate_rst: bool
520     """
521
522     try:
523         idx = header.index(u"Test Case")
524     except ValueError:
525         idx = 0
526     params = {
527         u"align-hdr": (
528             [u"left", u"right"],
529             [u"left", u"left", u"right"],
530             [u"left", u"left", u"left", u"right"]
531         ),
532         u"align-itm": (
533             [u"left", u"right"],
534             [u"left", u"left", u"right"],
535             [u"left", u"left", u"left", u"right"]
536         ),
537         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
538     }
539
540     df_data = pd.DataFrame(data, columns=header)
541
542     if sort_data:
543         df_sorted = [df_data.sort_values(
544             by=[key, header[idx]], ascending=[True, True]
545             if key != header[idx] else [False, True]) for key in header]
546         df_sorted_rev = [df_data.sort_values(
547             by=[key, header[idx]], ascending=[False, True]
548             if key != header[idx] else [True, True]) for key in header]
549         df_sorted.extend(df_sorted_rev)
550     else:
551         df_sorted = df_data
552
553     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
554                    for idx in range(len(df_data))]]
555     table_header = dict(
556         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
557         fill_color=u"#7eade7",
558         align=params[u"align-hdr"][idx],
559         font=dict(
560             family=u"Courier New",
561             size=12
562         )
563     )
564
565     fig = go.Figure()
566
567     if sort_data:
568         for table in df_sorted:
569             columns = [table.get(col) for col in header]
570             fig.add_trace(
571                 go.Table(
572                     columnwidth=params[u"width"][idx],
573                     header=table_header,
574                     cells=dict(
575                         values=columns,
576                         fill_color=fill_color,
577                         align=params[u"align-itm"][idx],
578                         font=dict(
579                             family=u"Courier New",
580                             size=12
581                         )
582                     )
583                 )
584             )
585
586         buttons = list()
587         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
588         menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
589         menu_items.extend(menu_items_rev)
590         for idx, hdr in enumerate(menu_items):
591             visible = [False, ] * len(menu_items)
592             visible[idx] = True
593             buttons.append(
594                 dict(
595                     label=hdr.replace(u" [Mpps]", u""),
596                     method=u"update",
597                     args=[{u"visible": visible}],
598                 )
599             )
600
601         fig.update_layout(
602             updatemenus=[
603                 go.layout.Updatemenu(
604                     type=u"dropdown",
605                     direction=u"down",
606                     x=0.0,
607                     xanchor=u"left",
608                     y=1.002,
609                     yanchor=u"bottom",
610                     active=len(menu_items) - 1,
611                     buttons=list(buttons)
612                 )
613             ],
614         )
615     else:
616         fig.add_trace(
617             go.Table(
618                 columnwidth=params[u"width"][idx],
619                 header=table_header,
620                 cells=dict(
621                     values=[df_sorted.get(col) for col in header],
622                     fill_color=fill_color,
623                     align=params[u"align-itm"][idx],
624                     font=dict(
625                         family=u"Courier New",
626                         size=12
627                     )
628                 )
629             )
630         )
631
632     ploff.plot(
633         fig,
634         show_link=False,
635         auto_open=False,
636         filename=f"{out_file_name}_in.html"
637     )
638
639     if not generate_rst:
640         return
641
642     file_name = out_file_name.split(u"/")[-1]
643     if u"vpp" in out_file_name:
644         path = u"_tmp/src/vpp_performance_tests/comparisons/"
645     else:
646         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
647     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
648         rst_file.write(
649             u"\n"
650             u".. |br| raw:: html\n\n    <br />\n\n\n"
651             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
652             u".. |preout| raw:: html\n\n    </pre>\n\n"
653         )
654         if title:
655             rst_file.write(f"{title}\n")
656             rst_file.write(f"{u'`' * len(title)}\n\n")
657         rst_file.write(
658             u".. raw:: html\n\n"
659             f'    <iframe frameborder="0" scrolling="no" '
660             f'width="1600" height="1200" '
661             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
662             f'</iframe>\n\n'
663         )
664         if legend:
665             rst_file.write(legend[1:].replace(u"\n", u" |br| "))
666         if footnote:
667             rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
668
669
670 def table_perf_comparison(table, input_data):
671     """Generate the table(s) with algorithm: table_perf_comparison
672     specified in the specification file.
673
674     :param table: Table to generate.
675     :param input_data: Data to process.
676     :type table: pandas.Series
677     :type input_data: InputData
678     """
679
680     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
681
682     # Transform the data
683     logging.info(
684         f"    Creating the data set for the {table.get(u'type', u'')} "
685         f"{table.get(u'title', u'')}."
686     )
687     data = input_data.filter_data(table, continue_on_error=True)
688
689     # Prepare the header of the tables
690     try:
691         header = [u"Test Case", ]
692         legend = u"\nLegend:\n"
693
694         rca_data = None
695         rca = table.get(u"rca", None)
696         if rca:
697             try:
698                 with open(rca.get(u"data-file", u""), u"r") as rca_file:
699                     rca_data = load(rca_file, Loader=FullLoader)
700                 header.insert(0, rca.get(u"title", u"RCA"))
701                 legend += (
702                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
703                 )
704             except (YAMLError, IOError) as err:
705                 logging.warning(repr(err))
706
707         history = table.get(u"history", list())
708         for item in history:
709             header.extend(
710                 [
711                     f"{item[u'title']} Avg({table[u'include-tests']})",
712                     f"{item[u'title']} Stdev({table[u'include-tests']})"
713                 ]
714             )
715             legend += (
716                 f"{item[u'title']} Avg({table[u'include-tests']}): "
717                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
718                 f"a series of runs of the listed tests executed against "
719                 f"{item[u'title']}.\n"
720                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
721                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
722                 f"computed from a series of runs of the listed tests executed "
723                 f"against {item[u'title']}.\n"
724             )
725         header.extend(
726             [
727                 f"{table[u'reference'][u'title']} "
728                 f"Avg({table[u'include-tests']})",
729                 f"{table[u'reference'][u'title']} "
730                 f"Stdev({table[u'include-tests']})",
731                 f"{table[u'compare'][u'title']} "
732                 f"Avg({table[u'include-tests']})",
733                 f"{table[u'compare'][u'title']} "
734                 f"Stdev({table[u'include-tests']})",
735                 f"Diff({table[u'reference'][u'title']},"
736                 f"{table[u'compare'][u'title']})",
737                 u"Stdev(Diff)"
738             ]
739         )
740         header_str = u";".join(header) + u"\n"
741         legend += (
742             f"{table[u'reference'][u'title']} "
743             f"Avg({table[u'include-tests']}): "
744             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
745             f"series of runs of the listed tests executed against "
746             f"{table[u'reference'][u'title']}.\n"
747             f"{table[u'reference'][u'title']} "
748             f"Stdev({table[u'include-tests']}): "
749             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
750             f"computed from a series of runs of the listed tests executed "
751             f"against {table[u'reference'][u'title']}.\n"
752             f"{table[u'compare'][u'title']} "
753             f"Avg({table[u'include-tests']}): "
754             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
755             f"series of runs of the listed tests executed against "
756             f"{table[u'compare'][u'title']}.\n"
757             f"{table[u'compare'][u'title']} "
758             f"Stdev({table[u'include-tests']}): "
759             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
760             f"computed from a series of runs of the listed tests executed "
761             f"against {table[u'compare'][u'title']}.\n"
762             f"Diff({table[u'reference'][u'title']},"
763             f"{table[u'compare'][u'title']}): "
764             f"Percentage change calculated for mean values.\n"
765             u"Stdev(Diff): "
766             u"Standard deviation of percentage change calculated for mean "
767             u"values.\n"
768             u"NT: Not Tested\n"
769         )
770     except (AttributeError, KeyError) as err:
771         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
772         return
773
774     # Prepare data to the table:
775     tbl_dict = dict()
776     for job, builds in table[u"reference"][u"data"].items():
777         for build in builds:
778             for tst_name, tst_data in data[job][str(build)].items():
779                 tst_name_mod = _tpc_modify_test_name(tst_name)
780                 if (u"across topologies" in table[u"title"].lower() or
781                         (u" 3n-" in table[u"title"].lower() and
782                          u" 2n-" in table[u"title"].lower())):
783                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
784                 if tbl_dict.get(tst_name_mod, None) is None:
785                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
786                     if u"across testbeds" in table[u"title"].lower() or \
787                             u"across topologies" in table[u"title"].lower():
788                         name = _tpc_modify_displayed_test_name(name)
789                     tbl_dict[tst_name_mod] = {
790                         u"name": name,
791                         u"replace-ref": True,
792                         u"replace-cmp": True,
793                         u"ref-data": list(),
794                         u"cmp-data": list()
795                     }
796                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
797                                  src=tst_data,
798                                  include_tests=table[u"include-tests"])
799
800     replacement = table[u"reference"].get(u"data-replacement", None)
801     if replacement:
802         rpl_data = input_data.filter_data(
803             table, data=replacement, continue_on_error=True)
804         for job, builds in replacement.items():
805             for build in builds:
806                 for tst_name, tst_data in rpl_data[job][str(build)].items():
807                     tst_name_mod = _tpc_modify_test_name(tst_name)
808                     if (u"across topologies" in table[u"title"].lower() or
809                             (u" 3n-" in table[u"title"].lower() and
810                              u" 2n-" in table[u"title"].lower())):
811                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
812                     if tbl_dict.get(tst_name_mod, None) is None:
813                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
814                         if u"across testbeds" in table[u"title"].lower() or \
815                                 u"across topologies" in table[u"title"].lower():
816                             name = _tpc_modify_displayed_test_name(name)
817                         tbl_dict[tst_name_mod] = {
818                             u"name": name,
819                             u"replace-ref": False,
820                             u"replace-cmp": True,
821                             u"ref-data": list(),
822                             u"cmp-data": list()
823                         }
824                     if tbl_dict[tst_name_mod][u"replace-ref"]:
825                         tbl_dict[tst_name_mod][u"replace-ref"] = False
826                         tbl_dict[tst_name_mod][u"ref-data"] = list()
827
828                     _tpc_insert_data(
829                         target=tbl_dict[tst_name_mod][u"ref-data"],
830                         src=tst_data,
831                         include_tests=table[u"include-tests"]
832                     )
833
834     for job, builds in table[u"compare"][u"data"].items():
835         for build in builds:
836             for tst_name, tst_data in data[job][str(build)].items():
837                 tst_name_mod = _tpc_modify_test_name(tst_name)
838                 if (u"across topologies" in table[u"title"].lower() or
839                         (u" 3n-" in table[u"title"].lower() and
840                          u" 2n-" in table[u"title"].lower())):
841                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
842                 if tbl_dict.get(tst_name_mod, None) is None:
843                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
844                     if u"across testbeds" in table[u"title"].lower() or \
845                             u"across topologies" in table[u"title"].lower():
846                         name = _tpc_modify_displayed_test_name(name)
847                     tbl_dict[tst_name_mod] = {
848                         u"name": name,
849                         u"replace-ref": False,
850                         u"replace-cmp": True,
851                         u"ref-data": list(),
852                         u"cmp-data": list()
853                     }
854                 _tpc_insert_data(
855                     target=tbl_dict[tst_name_mod][u"cmp-data"],
856                     src=tst_data,
857                     include_tests=table[u"include-tests"]
858                 )
859
860     replacement = table[u"compare"].get(u"data-replacement", None)
861     if replacement:
862         rpl_data = input_data.filter_data(
863             table, data=replacement, continue_on_error=True)
864         for job, builds in replacement.items():
865             for build in builds:
866                 for tst_name, tst_data in rpl_data[job][str(build)].items():
867                     tst_name_mod = _tpc_modify_test_name(tst_name)
868                     if (u"across topologies" in table[u"title"].lower() or
869                             (u" 3n-" in table[u"title"].lower() and
870                              u" 2n-" in table[u"title"].lower())):
871                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
872                     if tbl_dict.get(tst_name_mod, None) is None:
873                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
874                         if u"across testbeds" in table[u"title"].lower() or \
875                                 u"across topologies" in table[u"title"].lower():
876                             name = _tpc_modify_displayed_test_name(name)
877                         tbl_dict[tst_name_mod] = {
878                             u"name": name,
879                             u"replace-ref": False,
880                             u"replace-cmp": False,
881                             u"ref-data": list(),
882                             u"cmp-data": list()
883                         }
884                     if tbl_dict[tst_name_mod][u"replace-cmp"]:
885                         tbl_dict[tst_name_mod][u"replace-cmp"] = False
886                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
887
888                     _tpc_insert_data(
889                         target=tbl_dict[tst_name_mod][u"cmp-data"],
890                         src=tst_data,
891                         include_tests=table[u"include-tests"]
892                     )
893
894     for item in history:
895         for job, builds in item[u"data"].items():
896             for build in builds:
897                 for tst_name, tst_data in data[job][str(build)].items():
898                     tst_name_mod = _tpc_modify_test_name(tst_name)
899                     if (u"across topologies" in table[u"title"].lower() or
900                             (u" 3n-" in table[u"title"].lower() and
901                              u" 2n-" in table[u"title"].lower())):
902                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
903                     if tbl_dict.get(tst_name_mod, None) is None:
904                         continue
905                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
906                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
907                     if tbl_dict[tst_name_mod][u"history"].\
908                             get(item[u"title"], None) is None:
909                         tbl_dict[tst_name_mod][u"history"][item[
910                             u"title"]] = list()
911                     try:
912                         if table[u"include-tests"] == u"MRR":
913                             res = (tst_data[u"result"][u"receive-rate"],
914                                    tst_data[u"result"][u"receive-stdev"])
915                         elif table[u"include-tests"] == u"PDR":
916                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
917                         elif table[u"include-tests"] == u"NDR":
918                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
919                         else:
920                             continue
921                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
922                             append(res)
923                     except (TypeError, KeyError):
924                         pass
925
926     tbl_lst = list()
927     for tst_name in tbl_dict:
928         item = [tbl_dict[tst_name][u"name"], ]
929         if history:
930             if tbl_dict[tst_name].get(u"history", None) is not None:
931                 for hist_data in tbl_dict[tst_name][u"history"].values():
932                     if hist_data:
933                         if table[u"include-tests"] == u"MRR":
934                             item.append(round(hist_data[0][0] / 1e6, 1))
935                             item.append(round(hist_data[0][1] / 1e6, 1))
936                         else:
937                             item.append(round(mean(hist_data) / 1e6, 1))
938                             item.append(round(stdev(hist_data) / 1e6, 1))
939                     else:
940                         item.extend([u"NT", u"NT"])
941             else:
942                 item.extend([u"NT", u"NT"])
943         data_r = tbl_dict[tst_name][u"ref-data"]
944         if data_r:
945             if table[u"include-tests"] == u"MRR":
946                 data_r_mean = data_r[0][0]
947                 data_r_stdev = data_r[0][1]
948             else:
949                 data_r_mean = mean(data_r)
950                 data_r_stdev = stdev(data_r)
951             item.append(round(data_r_mean / 1e6, 1))
952             item.append(round(data_r_stdev / 1e6, 1))
953         else:
954             data_r_mean = None
955             data_r_stdev = None
956             item.extend([u"NT", u"NT"])
957         data_c = tbl_dict[tst_name][u"cmp-data"]
958         if data_c:
959             if table[u"include-tests"] == u"MRR":
960                 data_c_mean = data_c[0][0]
961                 data_c_stdev = data_c[0][1]
962             else:
963                 data_c_mean = mean(data_c)
964                 data_c_stdev = stdev(data_c)
965             item.append(round(data_c_mean / 1e6, 1))
966             item.append(round(data_c_stdev / 1e6, 1))
967         else:
968             data_c_mean = None
969             data_c_stdev = None
970             item.extend([u"NT", u"NT"])
971         if item[-2] == u"NT":
972             pass
973         elif item[-4] == u"NT":
974             item.append(u"New in CSIT-2001")
975             item.append(u"New in CSIT-2001")
976         elif data_r_mean is not None and data_c_mean is not None:
977             delta, d_stdev = relative_change_stdev(
978                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
979             )
980             try:
981                 item.append(round(delta))
982             except ValueError:
983                 item.append(delta)
984             try:
985                 item.append(round(d_stdev))
986             except ValueError:
987                 item.append(d_stdev)
988         if rca_data:
989             rca_nr = rca_data.get(item[0], u"-")
990             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
991         if (len(item) == len(header)) and (item[-4] != u"NT"):
992             tbl_lst.append(item)
993
994     tbl_lst = _tpc_sort_table(tbl_lst)
995
996     # Generate csv tables:
997     csv_file = f"{table[u'output-file']}.csv"
998     with open(csv_file, u"wt") as file_handler:
999         file_handler.write(header_str)
1000         for test in tbl_lst:
1001             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1002
1003     txt_file_name = f"{table[u'output-file']}.txt"
1004     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1005
1006     footnote = u""
1007     with open(txt_file_name, u'a') as txt_file:
1008         txt_file.write(legend)
1009         if rca_data:
1010             footnote = rca_data.get(u"footnote", u"")
1011             if footnote:
1012                 txt_file.write(f"\n{footnote}")
1013         txt_file.write(u"\n:END")
1014
1015     # Generate html table:
1016     _tpc_generate_html_table(
1017         header,
1018         tbl_lst,
1019         table[u'output-file'],
1020         legend=legend,
1021         footnote=footnote,
1022         title=table.get(u"title", u"")
1023     )
1024
1025
1026 def table_perf_comparison_nic(table, input_data):
1027     """Generate the table(s) with algorithm: table_perf_comparison
1028     specified in the specification file.
1029
1030     :param table: Table to generate.
1031     :param input_data: Data to process.
1032     :type table: pandas.Series
1033     :type input_data: InputData
1034     """
1035
1036     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1037
1038     # Transform the data
1039     logging.info(
1040         f"    Creating the data set for the {table.get(u'type', u'')} "
1041         f"{table.get(u'title', u'')}."
1042     )
1043     data = input_data.filter_data(table, continue_on_error=True)
1044
1045     # Prepare the header of the tables
1046     try:
1047         header = [u"Test Case", ]
1048         legend = u"\nLegend:\n"
1049
1050         rca_data = None
1051         rca = table.get(u"rca", None)
1052         if rca:
1053             try:
1054                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1055                     rca_data = load(rca_file, Loader=FullLoader)
1056                 header.insert(0, rca.get(u"title", "RCA"))
1057                 legend += (
1058                     u"RCA: Reference to the Root Cause Analysis, see below.\n"
1059                 )
1060             except (YAMLError, IOError) as err:
1061                 logging.warning(repr(err))
1062
1063         history = table.get(u"history", list())
1064         for item in history:
1065             header.extend(
1066                 [
1067                     f"{item[u'title']} Avg({table[u'include-tests']})",
1068                     f"{item[u'title']} Stdev({table[u'include-tests']})"
1069                 ]
1070             )
1071             legend += (
1072                 f"{item[u'title']} Avg({table[u'include-tests']}): "
1073                 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1074                 f"a series of runs of the listed tests executed against "
1075                 f"{item[u'title']}.\n"
1076                 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1077                 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1078                 f"computed from a series of runs of the listed tests executed "
1079                 f"against {item[u'title']}.\n"
1080             )
1081         header.extend(
1082             [
1083                 f"{table[u'reference'][u'title']} "
1084                 f"Avg({table[u'include-tests']})",
1085                 f"{table[u'reference'][u'title']} "
1086                 f"Stdev({table[u'include-tests']})",
1087                 f"{table[u'compare'][u'title']} "
1088                 f"Avg({table[u'include-tests']})",
1089                 f"{table[u'compare'][u'title']} "
1090                 f"Stdev({table[u'include-tests']})",
1091                 f"Diff({table[u'reference'][u'title']},"
1092                 f"{table[u'compare'][u'title']})",
1093                 u"Stdev(Diff)"
1094             ]
1095         )
1096         header_str = u";".join(header) + u"\n"
1097         legend += (
1098             f"{table[u'reference'][u'title']} "
1099             f"Avg({table[u'include-tests']}): "
1100             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1101             f"series of runs of the listed tests executed against "
1102             f"{table[u'reference'][u'title']}.\n"
1103             f"{table[u'reference'][u'title']} "
1104             f"Stdev({table[u'include-tests']}): "
1105             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1106             f"computed from a series of runs of the listed tests executed "
1107             f"against {table[u'reference'][u'title']}.\n"
1108             f"{table[u'compare'][u'title']} "
1109             f"Avg({table[u'include-tests']}): "
1110             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1111             f"series of runs of the listed tests executed against "
1112             f"{table[u'compare'][u'title']}.\n"
1113             f"{table[u'compare'][u'title']} "
1114             f"Stdev({table[u'include-tests']}): "
1115             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1116             f"computed from a series of runs of the listed tests executed "
1117             f"against {table[u'compare'][u'title']}.\n"
1118             f"Diff({table[u'reference'][u'title']},"
1119             f"{table[u'compare'][u'title']}): "
1120             f"Percentage change calculated for mean values.\n"
1121             u"Stdev(Diff): "
1122             u"Standard deviation of percentage change calculated for mean "
1123             u"values.\n"
1124             u"NT: Not Tested\n"
1125         )
1126     except (AttributeError, KeyError) as err:
1127         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1128         return
1129
1130     # Prepare data to the table:
1131     tbl_dict = dict()
1132     for job, builds in table[u"reference"][u"data"].items():
1133         for build in builds:
1134             for tst_name, tst_data in data[job][str(build)].items():
1135                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1136                     continue
1137                 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1138                 if (u"across topologies" in table[u"title"].lower() or
1139                         (u" 3n-" in table[u"title"].lower() and
1140                          u" 2n-" in table[u"title"].lower())):
1141                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1142                 if tbl_dict.get(tst_name_mod, None) is None:
1143                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1144                     if u"across testbeds" in table[u"title"].lower() or \
1145                             u"across topologies" in table[u"title"].lower():
1146                         name = _tpc_modify_displayed_test_name(name)
1147                     tbl_dict[tst_name_mod] = {
1148                         u"name": name,
1149                         u"replace-ref": True,
1150                         u"replace-cmp": True,
1151                         u"ref-data": list(),
1152                         u"cmp-data": list()
1153                     }
1154                 _tpc_insert_data(
1155                     target=tbl_dict[tst_name_mod][u"ref-data"],
1156                     src=tst_data,
1157                     include_tests=table[u"include-tests"]
1158                 )
1159
1160     replacement = table[u"reference"].get(u"data-replacement", None)
1161     if replacement:
1162         rpl_data = input_data.filter_data(
1163             table, data=replacement, continue_on_error=True)
1164         for job, builds in replacement.items():
1165             for build in builds:
1166                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1167                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1168                         continue
1169                     tst_name_mod = \
1170                         _tpc_modify_test_name(tst_name, ignore_nic=True)
1171                     if (u"across topologies" in table[u"title"].lower() or
1172                             (u" 3n-" in table[u"title"].lower() and
1173                              u" 2n-" in table[u"title"].lower())):
1174                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1175                     if tbl_dict.get(tst_name_mod, None) is None:
1176                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1177                         if u"across testbeds" in table[u"title"].lower() or \
1178                                 u"across topologies" in table[u"title"].lower():
1179                             name = _tpc_modify_displayed_test_name(name)
1180                         tbl_dict[tst_name_mod] = {
1181                             u"name": name,
1182                             u"replace-ref": False,
1183                             u"replace-cmp": True,
1184                             u"ref-data": list(),
1185                             u"cmp-data": list()
1186                         }
1187                     if tbl_dict[tst_name_mod][u"replace-ref"]:
1188                         tbl_dict[tst_name_mod][u"replace-ref"] = False
1189                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1190
1191                     _tpc_insert_data(
1192                         target=tbl_dict[tst_name_mod][u"ref-data"],
1193                         src=tst_data,
1194                         include_tests=table[u"include-tests"]
1195                     )
1196
1197     for job, builds in table[u"compare"][u"data"].items():
1198         for build in builds:
1199             for tst_name, tst_data in data[job][str(build)].items():
1200                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1201                     continue
1202                 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1203                 if (u"across topologies" in table[u"title"].lower() or
1204                         (u" 3n-" in table[u"title"].lower() and
1205                          u" 2n-" in table[u"title"].lower())):
1206                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1207                 if tbl_dict.get(tst_name_mod, None) is None:
1208                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1209                     if u"across testbeds" in table[u"title"].lower() or \
1210                             u"across topologies" in table[u"title"].lower():
1211                         name = _tpc_modify_displayed_test_name(name)
1212                     tbl_dict[tst_name_mod] = {
1213                         u"name": name,
1214                         u"replace-ref": False,
1215                         u"replace-cmp": True,
1216                         u"ref-data": list(),
1217                         u"cmp-data": list()
1218                     }
1219                 _tpc_insert_data(
1220                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1221                     src=tst_data,
1222                     include_tests=table[u"include-tests"]
1223                 )
1224
1225     replacement = table[u"compare"].get(u"data-replacement", None)
1226     if replacement:
1227         rpl_data = input_data.filter_data(
1228             table, data=replacement, continue_on_error=True)
1229         for job, builds in replacement.items():
1230             for build in builds:
1231                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1232                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1233                         continue
1234                     tst_name_mod = \
1235                         _tpc_modify_test_name(tst_name, ignore_nic=True)
1236                     if (u"across topologies" in table[u"title"].lower() or
1237                             (u" 3n-" in table[u"title"].lower() and
1238                              u" 2n-" in table[u"title"].lower())):
1239                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1240                     if tbl_dict.get(tst_name_mod, None) is None:
1241                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1242                         if u"across testbeds" in table[u"title"].lower() or \
1243                                 u"across topologies" in table[u"title"].lower():
1244                             name = _tpc_modify_displayed_test_name(name)
1245                         tbl_dict[tst_name_mod] = {
1246                             u"name": name,
1247                             u"replace-ref": False,
1248                             u"replace-cmp": False,
1249                             u"ref-data": list(),
1250                             u"cmp-data": list()
1251                         }
1252                     if tbl_dict[tst_name_mod][u"replace-cmp"]:
1253                         tbl_dict[tst_name_mod][u"replace-cmp"] = False
1254                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1255
1256                     _tpc_insert_data(
1257                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1258                         src=tst_data,
1259                         include_tests=table[u"include-tests"]
1260                     )
1261
1262     for item in history:
1263         for job, builds in item[u"data"].items():
1264             for build in builds:
1265                 for tst_name, tst_data in data[job][str(build)].items():
1266                     if item[u"nic"] not in tst_data[u"tags"]:
1267                         continue
1268                     tst_name_mod = \
1269                         _tpc_modify_test_name(tst_name, ignore_nic=True)
1270                     if (u"across topologies" in table[u"title"].lower() or
1271                             (u" 3n-" in table[u"title"].lower() and
1272                              u" 2n-" in table[u"title"].lower())):
1273                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1274                     if tbl_dict.get(tst_name_mod, None) is None:
1275                         continue
1276                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1277                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1278                     if tbl_dict[tst_name_mod][u"history"].\
1279                             get(item[u"title"], None) is None:
1280                         tbl_dict[tst_name_mod][u"history"][item[
1281                             u"title"]] = list()
1282                     try:
1283                         if table[u"include-tests"] == u"MRR":
1284                             res = (tst_data[u"result"][u"receive-rate"],
1285                                    tst_data[u"result"][u"receive-stdev"])
1286                         elif table[u"include-tests"] == u"PDR":
1287                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1288                         elif table[u"include-tests"] == u"NDR":
1289                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1290                         else:
1291                             continue
1292                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1293                             append(res)
1294                     except (TypeError, KeyError):
1295                         pass
1296
1297     tbl_lst = list()
1298     for tst_name in tbl_dict:
1299         item = [tbl_dict[tst_name][u"name"], ]
1300         if history:
1301             if tbl_dict[tst_name].get(u"history", None) is not None:
1302                 for hist_data in tbl_dict[tst_name][u"history"].values():
1303                     if hist_data:
1304                         if table[u"include-tests"] == u"MRR":
1305                             item.append(round(hist_data[0][0] / 1e6, 1))
1306                             item.append(round(hist_data[0][1] / 1e6, 1))
1307                         else:
1308                             item.append(round(mean(hist_data) / 1e6, 1))
1309                             item.append(round(stdev(hist_data) / 1e6, 1))
1310                     else:
1311                         item.extend([u"NT", u"NT"])
1312             else:
1313                 item.extend([u"NT", u"NT"])
1314         data_r = tbl_dict[tst_name][u"ref-data"]
1315         if data_r:
1316             if table[u"include-tests"] == u"MRR":
1317                 data_r_mean = data_r[0][0]
1318                 data_r_stdev = data_r[0][1]
1319             else:
1320                 data_r_mean = mean(data_r)
1321                 data_r_stdev = stdev(data_r)
1322             item.append(round(data_r_mean / 1e6, 1))
1323             item.append(round(data_r_stdev / 1e6, 1))
1324         else:
1325             data_r_mean = None
1326             data_r_stdev = None
1327             item.extend([u"NT", u"NT"])
1328         data_c = tbl_dict[tst_name][u"cmp-data"]
1329         if data_c:
1330             if table[u"include-tests"] == u"MRR":
1331                 data_c_mean = data_c[0][0]
1332                 data_c_stdev = data_c[0][1]
1333             else:
1334                 data_c_mean = mean(data_c)
1335                 data_c_stdev = stdev(data_c)
1336             item.append(round(data_c_mean / 1e6, 1))
1337             item.append(round(data_c_stdev / 1e6, 1))
1338         else:
1339             data_c_mean = None
1340             data_c_stdev = None
1341             item.extend([u"NT", u"NT"])
1342         if item[-2] == u"NT":
1343             pass
1344         elif item[-4] == u"NT":
1345             item.append(u"New in CSIT-2001")
1346             item.append(u"New in CSIT-2001")
1347         elif data_r_mean is not None and data_c_mean is not None:
1348             delta, d_stdev = relative_change_stdev(
1349                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1350             )
1351             try:
1352                 item.append(round(delta))
1353             except ValueError:
1354                 item.append(delta)
1355             try:
1356                 item.append(round(d_stdev))
1357             except ValueError:
1358                 item.append(d_stdev)
1359         if rca_data:
1360             rca_nr = rca_data.get(item[0], u"-")
1361             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1362         if (len(item) == len(header)) and (item[-4] != u"NT"):
1363             tbl_lst.append(item)
1364
1365     tbl_lst = _tpc_sort_table(tbl_lst)
1366
1367     # Generate csv tables:
1368     csv_file = f"{table[u'output-file']}.csv"
1369     with open(csv_file, u"wt") as file_handler:
1370         file_handler.write(header_str)
1371         for test in tbl_lst:
1372             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1373
1374     txt_file_name = f"{table[u'output-file']}.txt"
1375     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1376
1377     footnote = u""
1378     with open(txt_file_name, u'a') as txt_file:
1379         txt_file.write(legend)
1380         if rca_data:
1381             footnote = rca_data.get(u"footnote", u"")
1382             if footnote:
1383                 txt_file.write(f"\n{footnote}")
1384         txt_file.write(u"\n:END")
1385
1386     # Generate html table:
1387     _tpc_generate_html_table(
1388         header,
1389         tbl_lst,
1390         table[u'output-file'],
1391         legend=legend,
1392         footnote=footnote,
1393         title=table.get(u"title", u"")
1394     )
1395
1396
1397 def table_nics_comparison(table, input_data):
1398     """Generate the table(s) with algorithm: table_nics_comparison
1399     specified in the specification file.
1400
1401     :param table: Table to generate.
1402     :param input_data: Data to process.
1403     :type table: pandas.Series
1404     :type input_data: InputData
1405     """
1406
1407     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1408
1409     # Transform the data
1410     logging.info(
1411         f"    Creating the data set for the {table.get(u'type', u'')} "
1412         f"{table.get(u'title', u'')}."
1413     )
1414     data = input_data.filter_data(table, continue_on_error=True)
1415
1416     # Prepare the header of the tables
1417     try:
1418         header = [
1419             u"Test Case",
1420             f"{table[u'reference'][u'title']} "
1421             f"Avg({table[u'include-tests']})",
1422             f"{table[u'reference'][u'title']} "
1423             f"Stdev({table[u'include-tests']})",
1424             f"{table[u'compare'][u'title']} "
1425             f"Avg({table[u'include-tests']})",
1426             f"{table[u'compare'][u'title']} "
1427             f"Stdev({table[u'include-tests']})",
1428             f"Diff({table[u'reference'][u'title']},"
1429             f"{table[u'compare'][u'title']})",
1430             u"Stdev(Diff)"
1431         ]
1432         legend = (
1433             u"\nLegend:\n"
1434             f"{table[u'reference'][u'title']} "
1435             f"Avg({table[u'include-tests']}): "
1436             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1437             f"series of runs of the listed tests executed using "
1438             f"{table[u'reference'][u'title']} NIC.\n"
1439             f"{table[u'reference'][u'title']} "
1440             f"Stdev({table[u'include-tests']}): "
1441             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1442             f"computed from a series of runs of the listed tests executed "
1443             f"using {table[u'reference'][u'title']} NIC.\n"
1444             f"{table[u'compare'][u'title']} "
1445             f"Avg({table[u'include-tests']}): "
1446             f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1447             f"series of runs of the listed tests executed using "
1448             f"{table[u'compare'][u'title']} NIC.\n"
1449             f"{table[u'compare'][u'title']} "
1450             f"Stdev({table[u'include-tests']}): "
1451             f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1452             f"computed from a series of runs of the listed tests executed "
1453             f"using {table[u'compare'][u'title']} NIC.\n"
1454             f"Diff({table[u'reference'][u'title']},"
1455             f"{table[u'compare'][u'title']}): "
1456             f"Percentage change calculated for mean values.\n"
1457             u"Stdev(Diff): "
1458             u"Standard deviation of percentage change calculated for mean "
1459             u"values.\n"
1460             u":END"
1461         )
1462
1463     except (AttributeError, KeyError) as err:
1464         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1465         return
1466
1467     # Prepare data to the table:
1468     tbl_dict = dict()
1469     for job, builds in table[u"data"].items():
1470         for build in builds:
1471             for tst_name, tst_data in data[job][str(build)].items():
1472                 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1473                 if tbl_dict.get(tst_name_mod, None) is None:
1474                     name = tst_data[u'name'].rsplit(u'-', 1)[0]
1475                     tbl_dict[tst_name_mod] = {
1476                         u"name": name,
1477                         u"ref-data": list(),
1478                         u"cmp-data": list()
1479                     }
1480                 try:
1481                     if table[u"include-tests"] == u"MRR":
1482                         result = (tst_data[u"result"][u"receive-rate"],
1483                                   tst_data[u"result"][u"receive-stdev"])
1484                     elif table[u"include-tests"] == u"PDR":
1485                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1486                     elif table[u"include-tests"] == u"NDR":
1487                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1488                     else:
1489                         continue
1490
1491                     if result and \
1492                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1493                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1494                     elif result and \
1495                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1496                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1497                 except (TypeError, KeyError) as err:
1498                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1499                     # No data in output.xml for this test
1500
1501     tbl_lst = list()
1502     for tst_name in tbl_dict:
1503         item = [tbl_dict[tst_name][u"name"], ]
1504         data_r = tbl_dict[tst_name][u"ref-data"]
1505         if data_r:
1506             if table[u"include-tests"] == u"MRR":
1507                 data_r_mean = data_r[0][0]
1508                 data_r_stdev = data_r[0][1]
1509             else:
1510                 data_r_mean = mean(data_r)
1511                 data_r_stdev = stdev(data_r)
1512             item.append(round(data_r_mean / 1e6, 1))
1513             item.append(round(data_r_stdev / 1e6, 1))
1514         else:
1515             data_r_mean = None
1516             data_r_stdev = None
1517             item.extend([None, None])
1518         data_c = tbl_dict[tst_name][u"cmp-data"]
1519         if data_c:
1520             if table[u"include-tests"] == u"MRR":
1521                 data_c_mean = data_c[0][0]
1522                 data_c_stdev = data_c[0][1]
1523             else:
1524                 data_c_mean = mean(data_c)
1525                 data_c_stdev = stdev(data_c)
1526             item.append(round(data_c_mean / 1e6, 1))
1527             item.append(round(data_c_stdev / 1e6, 1))
1528         else:
1529             data_c_mean = None
1530             data_c_stdev = None
1531             item.extend([None, None])
1532         if data_r_mean is not None and data_c_mean is not None:
1533             delta, d_stdev = relative_change_stdev(
1534                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1535             )
1536             try:
1537                 item.append(round(delta))
1538             except ValueError:
1539                 item.append(delta)
1540             try:
1541                 item.append(round(d_stdev))
1542             except ValueError:
1543                 item.append(d_stdev)
1544             tbl_lst.append(item)
1545
1546     # Sort the table according to the relative change
1547     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1548
1549     # Generate csv tables:
1550     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1551         file_handler.write(u";".join(header) + u"\n")
1552         for test in tbl_lst:
1553             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1554
1555     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1556                               f"{table[u'output-file']}.txt",
1557                               delimiter=u";")
1558
1559     with open(table[u'output-file'], u'a') as txt_file:
1560         txt_file.write(legend)
1561
1562     # Generate html table:
1563     _tpc_generate_html_table(
1564         header,
1565         tbl_lst,
1566         table[u'output-file'],
1567         legend=legend,
1568         title=table.get(u"title", u"")
1569     )
1570
1571
1572 def table_soak_vs_ndr(table, input_data):
1573     """Generate the table(s) with algorithm: table_soak_vs_ndr
1574     specified in the specification file.
1575
1576     :param table: Table to generate.
1577     :param input_data: Data to process.
1578     :type table: pandas.Series
1579     :type input_data: InputData
1580     """
1581
1582     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1583
1584     # Transform the data
1585     logging.info(
1586         f"    Creating the data set for the {table.get(u'type', u'')} "
1587         f"{table.get(u'title', u'')}."
1588     )
1589     data = input_data.filter_data(table, continue_on_error=True)
1590
1591     # Prepare the header of the table
1592     try:
1593         header = [
1594             u"Test Case",
1595             f"Avg({table[u'reference'][u'title']})",
1596             f"Stdev({table[u'reference'][u'title']})",
1597             f"Avg({table[u'compare'][u'title']})",
1598             f"Stdev{table[u'compare'][u'title']})",
1599             u"Diff",
1600             u"Stdev(Diff)"
1601         ]
1602         header_str = u";".join(header) + u"\n"
1603         legend = (
1604             u"\nLegend:\n"
1605             f"Avg({table[u'reference'][u'title']}): "
1606             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1607             f"from a series of runs of the listed tests.\n"
1608             f"Stdev({table[u'reference'][u'title']}): "
1609             f"Standard deviation value of {table[u'reference'][u'title']} "
1610             f"[Mpps] computed from a series of runs of the listed tests.\n"
1611             f"Avg({table[u'compare'][u'title']}): "
1612             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1613             f"a series of runs of the listed tests.\n"
1614             f"Stdev({table[u'compare'][u'title']}): "
1615             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1616             f"computed from a series of runs of the listed tests.\n"
1617             f"Diff({table[u'reference'][u'title']},"
1618             f"{table[u'compare'][u'title']}): "
1619             f"Percentage change calculated for mean values.\n"
1620             u"Stdev(Diff): "
1621             u"Standard deviation of percentage change calculated for mean "
1622             u"values.\n"
1623             u":END"
1624         )
1625     except (AttributeError, KeyError) as err:
1626         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1627         return
1628
1629     # Create a list of available SOAK test results:
1630     tbl_dict = dict()
1631     for job, builds in table[u"compare"][u"data"].items():
1632         for build in builds:
1633             for tst_name, tst_data in data[job][str(build)].items():
1634                 if tst_data[u"type"] == u"SOAK":
1635                     tst_name_mod = tst_name.replace(u"-soak", u"")
1636                     if tbl_dict.get(tst_name_mod, None) is None:
1637                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1638                         nic = groups.group(0) if groups else u""
1639                         name = (
1640                             f"{nic}-"
1641                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1642                         )
1643                         tbl_dict[tst_name_mod] = {
1644                             u"name": name,
1645                             u"ref-data": list(),
1646                             u"cmp-data": list()
1647                         }
1648                     try:
1649                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1650                             tst_data[u"throughput"][u"LOWER"])
1651                     except (KeyError, TypeError):
1652                         pass
1653     tests_lst = tbl_dict.keys()
1654
1655     # Add corresponding NDR test results:
1656     for job, builds in table[u"reference"][u"data"].items():
1657         for build in builds:
1658             for tst_name, tst_data in data[job][str(build)].items():
1659                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1660                     replace(u"-mrr", u"")
1661                 if tst_name_mod not in tests_lst:
1662                     continue
1663                 try:
1664                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1665                         continue
1666                     if table[u"include-tests"] == u"MRR":
1667                         result = (tst_data[u"result"][u"receive-rate"],
1668                                   tst_data[u"result"][u"receive-stdev"])
1669                     elif table[u"include-tests"] == u"PDR":
1670                         result = \
1671                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1672                     elif table[u"include-tests"] == u"NDR":
1673                         result = \
1674                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1675                     else:
1676                         result = None
1677                     if result is not None:
1678                         tbl_dict[tst_name_mod][u"ref-data"].append(
1679                             result)
1680                 except (KeyError, TypeError):
1681                     continue
1682
1683     tbl_lst = list()
1684     for tst_name in tbl_dict:
1685         item = [tbl_dict[tst_name][u"name"], ]
1686         data_r = tbl_dict[tst_name][u"ref-data"]
1687         if data_r:
1688             if table[u"include-tests"] == u"MRR":
1689                 data_r_mean = data_r[0][0]
1690                 data_r_stdev = data_r[0][1]
1691             else:
1692                 data_r_mean = mean(data_r)
1693                 data_r_stdev = stdev(data_r)
1694             item.append(round(data_r_mean / 1e6, 1))
1695             item.append(round(data_r_stdev / 1e6, 1))
1696         else:
1697             data_r_mean = None
1698             data_r_stdev = None
1699             item.extend([None, None])
1700         data_c = tbl_dict[tst_name][u"cmp-data"]
1701         if data_c:
1702             if table[u"include-tests"] == u"MRR":
1703                 data_c_mean = data_c[0][0]
1704                 data_c_stdev = data_c[0][1]
1705             else:
1706                 data_c_mean = mean(data_c)
1707                 data_c_stdev = stdev(data_c)
1708             item.append(round(data_c_mean / 1e6, 1))
1709             item.append(round(data_c_stdev / 1e6, 1))
1710         else:
1711             data_c_mean = None
1712             data_c_stdev = None
1713             item.extend([None, None])
1714         if data_r_mean is not None and data_c_mean is not None:
1715             delta, d_stdev = relative_change_stdev(
1716                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1717             try:
1718                 item.append(round(delta))
1719             except ValueError:
1720                 item.append(delta)
1721             try:
1722                 item.append(round(d_stdev))
1723             except ValueError:
1724                 item.append(d_stdev)
1725             tbl_lst.append(item)
1726
1727     # Sort the table according to the relative change
1728     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1729
1730     # Generate csv tables:
1731     csv_file = f"{table[u'output-file']}.csv"
1732     with open(csv_file, u"wt") as file_handler:
1733         file_handler.write(header_str)
1734         for test in tbl_lst:
1735             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1736
1737     convert_csv_to_pretty_txt(
1738         csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1739     )
1740     with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1741         txt_file.write(legend)
1742
1743     # Generate html table:
1744     _tpc_generate_html_table(
1745         header,
1746         tbl_lst,
1747         table[u'output-file'],
1748         legend=legend,
1749         title=table.get(u"title", u"")
1750     )
1751
1752
1753 def table_perf_trending_dash(table, input_data):
1754     """Generate the table(s) with algorithm:
1755     table_perf_trending_dash
1756     specified in the specification file.
1757
1758     :param table: Table to generate.
1759     :param input_data: Data to process.
1760     :type table: pandas.Series
1761     :type input_data: InputData
1762     """
1763
1764     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1765
1766     # Transform the data
1767     logging.info(
1768         f"    Creating the data set for the {table.get(u'type', u'')} "
1769         f"{table.get(u'title', u'')}."
1770     )
1771     data = input_data.filter_data(table, continue_on_error=True)
1772
1773     # Prepare the header of the tables
1774     header = [
1775         u"Test Case",
1776         u"Trend [Mpps]",
1777         u"Short-Term Change [%]",
1778         u"Long-Term Change [%]",
1779         u"Regressions [#]",
1780         u"Progressions [#]"
1781     ]
1782     header_str = u",".join(header) + u"\n"
1783
1784     # Prepare data to the table:
1785     tbl_dict = dict()
1786     for job, builds in table[u"data"].items():
1787         for build in builds:
1788             for tst_name, tst_data in data[job][str(build)].items():
1789                 if tst_name.lower() in table.get(u"ignore-list", list()):
1790                     continue
1791                 if tbl_dict.get(tst_name, None) is None:
1792                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1793                     if not groups:
1794                         continue
1795                     nic = groups.group(0)
1796                     tbl_dict[tst_name] = {
1797                         u"name": f"{nic}-{tst_data[u'name']}",
1798                         u"data": OrderedDict()
1799                     }
1800                 try:
1801                     tbl_dict[tst_name][u"data"][str(build)] = \
1802                         tst_data[u"result"][u"receive-rate"]
1803                 except (TypeError, KeyError):
1804                     pass  # No data in output.xml for this test
1805
1806     tbl_lst = list()
1807     for tst_name in tbl_dict:
1808         data_t = tbl_dict[tst_name][u"data"]
1809         if len(data_t) < 2:
1810             continue
1811
1812         classification_lst, avgs = classify_anomalies(data_t)
1813
1814         win_size = min(len(data_t), table[u"window"])
1815         long_win_size = min(len(data_t), table[u"long-trend-window"])
1816
1817         try:
1818             max_long_avg = max(
1819                 [x for x in avgs[-long_win_size:-win_size]
1820                  if not isnan(x)])
1821         except ValueError:
1822             max_long_avg = nan
1823         last_avg = avgs[-1]
1824         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1825
1826         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1827             rel_change_last = nan
1828         else:
1829             rel_change_last = round(
1830                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1831
1832         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1833             rel_change_long = nan
1834         else:
1835             rel_change_long = round(
1836                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1837
1838         if classification_lst:
1839             if isnan(rel_change_last) and isnan(rel_change_long):
1840                 continue
1841             if isnan(last_avg) or isnan(rel_change_last) or \
1842                     isnan(rel_change_long):
1843                 continue
1844             tbl_lst.append(
1845                 [tbl_dict[tst_name][u"name"],
1846                  round(last_avg / 1e6, 2),
1847                  rel_change_last,
1848                  rel_change_long,
1849                  classification_lst[-win_size:].count(u"regression"),
1850                  classification_lst[-win_size:].count(u"progression")])
1851
1852     tbl_lst.sort(key=lambda rel: rel[0])
1853
1854     tbl_sorted = list()
1855     for nrr in range(table[u"window"], -1, -1):
1856         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1857         for nrp in range(table[u"window"], -1, -1):
1858             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1859             tbl_out.sort(key=lambda rel: rel[2])
1860             tbl_sorted.extend(tbl_out)
1861
1862     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1863
1864     logging.info(f"    Writing file: {file_name}")
1865     with open(file_name, u"wt") as file_handler:
1866         file_handler.write(header_str)
1867         for test in tbl_sorted:
1868             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1869
1870     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1871     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1872
1873
1874 def _generate_url(testbed, test_name):
1875     """Generate URL to a trending plot from the name of the test case.
1876
1877     :param testbed: The testbed used for testing.
1878     :param test_name: The name of the test case.
1879     :type testbed: str
1880     :type test_name: str
1881     :returns: The URL to the plot with the trending data for the given test
1882         case.
1883     :rtype str
1884     """
1885
1886     if u"x520" in test_name:
1887         nic = u"x520"
1888     elif u"x710" in test_name:
1889         nic = u"x710"
1890     elif u"xl710" in test_name:
1891         nic = u"xl710"
1892     elif u"xxv710" in test_name:
1893         nic = u"xxv710"
1894     elif u"vic1227" in test_name:
1895         nic = u"vic1227"
1896     elif u"vic1385" in test_name:
1897         nic = u"vic1385"
1898     elif u"x553" in test_name:
1899         nic = u"x553"
1900     else:
1901         nic = u""
1902
1903     if u"64b" in test_name:
1904         frame_size = u"64b"
1905     elif u"78b" in test_name:
1906         frame_size = u"78b"
1907     elif u"imix" in test_name:
1908         frame_size = u"imix"
1909     elif u"9000b" in test_name:
1910         frame_size = u"9000b"
1911     elif u"1518b" in test_name:
1912         frame_size = u"1518b"
1913     elif u"114b" in test_name:
1914         frame_size = u"114b"
1915     else:
1916         frame_size = u""
1917
1918     if u"1t1c" in test_name or \
1919         (u"-1c-" in test_name and
1920          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1921         cores = u"1t1c"
1922     elif u"2t2c" in test_name or \
1923          (u"-2c-" in test_name and
1924           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1925         cores = u"2t2c"
1926     elif u"4t4c" in test_name or \
1927          (u"-4c-" in test_name and
1928           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1929         cores = u"4t4c"
1930     elif u"2t1c" in test_name or \
1931          (u"-1c-" in test_name and
1932           testbed in (u"2n-skx", u"3n-skx")):
1933         cores = u"2t1c"
1934     elif u"4t2c" in test_name:
1935         cores = u"4t2c"
1936     elif u"8t4c" in test_name:
1937         cores = u"8t4c"
1938     else:
1939         cores = u""
1940
1941     if u"testpmd" in test_name:
1942         driver = u"testpmd"
1943     elif u"l3fwd" in test_name:
1944         driver = u"l3fwd"
1945     elif u"avf" in test_name:
1946         driver = u"avf"
1947     elif u"dnv" in testbed or u"tsh" in testbed:
1948         driver = u"ixgbe"
1949     else:
1950         driver = u"dpdk"
1951
1952     if u"acl" in test_name or \
1953             u"macip" in test_name or \
1954             u"nat" in test_name or \
1955             u"policer" in test_name or \
1956             u"cop" in test_name:
1957         bsf = u"features"
1958     elif u"scale" in test_name:
1959         bsf = u"scale"
1960     elif u"base" in test_name:
1961         bsf = u"base"
1962     else:
1963         bsf = u"base"
1964
1965     if u"114b" in test_name and u"vhost" in test_name:
1966         domain = u"vts"
1967     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1968         domain = u"dpdk"
1969     elif u"memif" in test_name:
1970         domain = u"container_memif"
1971     elif u"srv6" in test_name:
1972         domain = u"srv6"
1973     elif u"vhost" in test_name:
1974         domain = u"vhost"
1975         if u"vppl2xc" in test_name:
1976             driver += u"-vpp"
1977         else:
1978             driver += u"-testpmd"
1979         if u"lbvpplacp" in test_name:
1980             bsf += u"-link-bonding"
1981     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1982         domain = u"nf_service_density_vnfc"
1983     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1984         domain = u"nf_service_density_cnfc"
1985     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1986         domain = u"nf_service_density_cnfp"
1987     elif u"ipsec" in test_name:
1988         domain = u"ipsec"
1989         if u"sw" in test_name:
1990             bsf += u"-sw"
1991         elif u"hw" in test_name:
1992             bsf += u"-hw"
1993     elif u"ethip4vxlan" in test_name:
1994         domain = u"ip4_tunnels"
1995     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1996         domain = u"ip4"
1997     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1998         domain = u"ip6"
1999     elif u"l2xcbase" in test_name or \
2000             u"l2xcscale" in test_name or \
2001             u"l2bdbasemaclrn" in test_name or \
2002             u"l2bdscale" in test_name or \
2003             u"l2patch" in test_name:
2004         domain = u"l2"
2005     else:
2006         domain = u""
2007
2008     file_name = u"-".join((domain, testbed, nic)) + u".html#"
2009     anchor_name = u"-".join((frame_size, cores, bsf, driver))
2010
2011     return file_name + anchor_name
2012
2013
2014 def table_perf_trending_dash_html(table, input_data):
2015     """Generate the table(s) with algorithm:
2016     table_perf_trending_dash_html specified in the specification
2017     file.
2018
2019     :param table: Table to generate.
2020     :param input_data: Data to process.
2021     :type table: dict
2022     :type input_data: InputData
2023     """
2024
2025     _ = input_data
2026
2027     if not table.get(u"testbed", None):
2028         logging.error(
2029             f"The testbed is not defined for the table "
2030             f"{table.get(u'title', u'')}."
2031         )
2032         return
2033
2034     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2035
2036     try:
2037         with open(table[u"input-file"], u'rt') as csv_file:
2038             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2039     except KeyError:
2040         logging.warning(u"The input file is not defined.")
2041         return
2042     except csv.Error as err:
2043         logging.warning(
2044             f"Not possible to process the file {table[u'input-file']}.\n"
2045             f"{repr(err)}"
2046         )
2047         return
2048
2049     # Table:
2050     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2051
2052     # Table header:
2053     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2054     for idx, item in enumerate(csv_lst[0]):
2055         alignment = u"left" if idx == 0 else u"center"
2056         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2057         thead.text = item
2058
2059     # Rows:
2060     colors = {
2061         u"regression": (
2062             u"#ffcccc",
2063             u"#ff9999"
2064         ),
2065         u"progression": (
2066             u"#c6ecc6",
2067             u"#9fdf9f"
2068         ),
2069         u"normal": (
2070             u"#e9f1fb",
2071             u"#d4e4f7"
2072         )
2073     }
2074     for r_idx, row in enumerate(csv_lst[1:]):
2075         if int(row[4]):
2076             color = u"regression"
2077         elif int(row[5]):
2078             color = u"progression"
2079         else:
2080             color = u"normal"
2081         trow = ET.SubElement(
2082             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2083         )
2084
2085         # Columns:
2086         for c_idx, item in enumerate(row):
2087             tdata = ET.SubElement(
2088                 trow,
2089                 u"td",
2090                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2091             )
2092             # Name:
2093             if c_idx == 0:
2094                 ref = ET.SubElement(
2095                     tdata,
2096                     u"a",
2097                     attrib=dict(
2098                         href=f"../trending/"
2099                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2100                     )
2101                 )
2102                 ref.text = item
2103             else:
2104                 tdata.text = item
2105     try:
2106         with open(table[u"output-file"], u'w') as html_file:
2107             logging.info(f"    Writing file: {table[u'output-file']}")
2108             html_file.write(u".. raw:: html\n\n\t")
2109             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2110             html_file.write(u"\n\t<p><br><br></p>\n")
2111     except KeyError:
2112         logging.warning(u"The output file is not defined.")
2113         return
2114
2115
2116 def table_last_failed_tests(table, input_data):
2117     """Generate the table(s) with algorithm: table_last_failed_tests
2118     specified in the specification file.
2119
2120     :param table: Table to generate.
2121     :param input_data: Data to process.
2122     :type table: pandas.Series
2123     :type input_data: InputData
2124     """
2125
2126     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2127
2128     # Transform the data
2129     logging.info(
2130         f"    Creating the data set for the {table.get(u'type', u'')} "
2131         f"{table.get(u'title', u'')}."
2132     )
2133
2134     data = input_data.filter_data(table, continue_on_error=True)
2135
2136     if data is None or data.empty:
2137         logging.warning(
2138             f"    No data for the {table.get(u'type', u'')} "
2139             f"{table.get(u'title', u'')}."
2140         )
2141         return
2142
2143     tbl_list = list()
2144     for job, builds in table[u"data"].items():
2145         for build in builds:
2146             build = str(build)
2147             try:
2148                 version = input_data.metadata(job, build).get(u"version", u"")
2149             except KeyError:
2150                 logging.error(f"Data for {job}: {build} is not present.")
2151                 return
2152             tbl_list.append(build)
2153             tbl_list.append(version)
2154             failed_tests = list()
2155             passed = 0
2156             failed = 0
2157             for tst_data in data[job][build].values:
2158                 if tst_data[u"status"] != u"FAIL":
2159                     passed += 1
2160                     continue
2161                 failed += 1
2162                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2163                 if not groups:
2164                     continue
2165                 nic = groups.group(0)
2166                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2167             tbl_list.append(str(passed))
2168             tbl_list.append(str(failed))
2169             tbl_list.extend(failed_tests)
2170
2171     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2172     logging.info(f"    Writing file: {file_name}")
2173     with open(file_name, u"wt") as file_handler:
2174         for test in tbl_list:
2175             file_handler.write(test + u'\n')
2176
2177
2178 def table_failed_tests(table, input_data):
2179     """Generate the table(s) with algorithm: table_failed_tests
2180     specified in the specification file.
2181
2182     :param table: Table to generate.
2183     :param input_data: Data to process.
2184     :type table: pandas.Series
2185     :type input_data: InputData
2186     """
2187
2188     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2189
2190     # Transform the data
2191     logging.info(
2192         f"    Creating the data set for the {table.get(u'type', u'')} "
2193         f"{table.get(u'title', u'')}."
2194     )
2195     data = input_data.filter_data(table, continue_on_error=True)
2196
2197     # Prepare the header of the tables
2198     header = [
2199         u"Test Case",
2200         u"Failures [#]",
2201         u"Last Failure [Time]",
2202         u"Last Failure [VPP-Build-Id]",
2203         u"Last Failure [CSIT-Job-Build-Id]"
2204     ]
2205
2206     # Generate the data for the table according to the model in the table
2207     # specification
2208
2209     now = dt.utcnow()
2210     timeperiod = timedelta(int(table.get(u"window", 7)))
2211
2212     tbl_dict = dict()
2213     for job, builds in table[u"data"].items():
2214         for build in builds:
2215             build = str(build)
2216             for tst_name, tst_data in data[job][build].items():
2217                 if tst_name.lower() in table.get(u"ignore-list", list()):
2218                     continue
2219                 if tbl_dict.get(tst_name, None) is None:
2220                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
2221                     if not groups:
2222                         continue
2223                     nic = groups.group(0)
2224                     tbl_dict[tst_name] = {
2225                         u"name": f"{nic}-{tst_data[u'name']}",
2226                         u"data": OrderedDict()
2227                     }
2228                 try:
2229                     generated = input_data.metadata(job, build).\
2230                         get(u"generated", u"")
2231                     if not generated:
2232                         continue
2233                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
2234                     if (now - then) <= timeperiod:
2235                         tbl_dict[tst_name][u"data"][build] = (
2236                             tst_data[u"status"],
2237                             generated,
2238                             input_data.metadata(job, build).get(u"version",
2239                                                                 u""),
2240                             build
2241                         )
2242                 except (TypeError, KeyError) as err:
2243                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2244
2245     max_fails = 0
2246     tbl_lst = list()
2247     for tst_data in tbl_dict.values():
2248         fails_nr = 0
2249         fails_last_date = u""
2250         fails_last_vpp = u""
2251         fails_last_csit = u""
2252         for val in tst_data[u"data"].values():
2253             if val[0] == u"FAIL":
2254                 fails_nr += 1
2255                 fails_last_date = val[1]
2256                 fails_last_vpp = val[2]
2257                 fails_last_csit = val[3]
2258         if fails_nr:
2259             max_fails = fails_nr if fails_nr > max_fails else max_fails
2260             tbl_lst.append(
2261                 [
2262                     tst_data[u"name"],
2263                     fails_nr,
2264                     fails_last_date,
2265                     fails_last_vpp,
2266                     f"mrr-daily-build-{fails_last_csit}"
2267                 ]
2268             )
2269
2270     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2271     tbl_sorted = list()
2272     for nrf in range(max_fails, -1, -1):
2273         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2274         tbl_sorted.extend(tbl_fails)
2275
2276     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2277     logging.info(f"    Writing file: {file_name}")
2278     with open(file_name, u"wt") as file_handler:
2279         file_handler.write(u",".join(header) + u"\n")
2280         for test in tbl_sorted:
2281             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2282
2283     logging.info(f"    Writing file: {table[u'output-file']}.txt")
2284     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2285
2286
2287 def table_failed_tests_html(table, input_data):
2288     """Generate the table(s) with algorithm: table_failed_tests_html
2289     specified in the specification file.
2290
2291     :param table: Table to generate.
2292     :param input_data: Data to process.
2293     :type table: pandas.Series
2294     :type input_data: InputData
2295     """
2296
2297     _ = input_data
2298
2299     if not table.get(u"testbed", None):
2300         logging.error(
2301             f"The testbed is not defined for the table "
2302             f"{table.get(u'title', u'')}."
2303         )
2304         return
2305
2306     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2307
2308     try:
2309         with open(table[u"input-file"], u'rt') as csv_file:
2310             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2311     except KeyError:
2312         logging.warning(u"The input file is not defined.")
2313         return
2314     except csv.Error as err:
2315         logging.warning(
2316             f"Not possible to process the file {table[u'input-file']}.\n"
2317             f"{repr(err)}"
2318         )
2319         return
2320
2321     # Table:
2322     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2323
2324     # Table header:
2325     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2326     for idx, item in enumerate(csv_lst[0]):
2327         alignment = u"left" if idx == 0 else u"center"
2328         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2329         thead.text = item
2330
2331     # Rows:
2332     colors = (u"#e9f1fb", u"#d4e4f7")
2333     for r_idx, row in enumerate(csv_lst[1:]):
2334         background = colors[r_idx % 2]
2335         trow = ET.SubElement(
2336             failed_tests, u"tr", attrib=dict(bgcolor=background)
2337         )
2338
2339         # Columns:
2340         for c_idx, item in enumerate(row):
2341             tdata = ET.SubElement(
2342                 trow,
2343                 u"td",
2344                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2345             )
2346             # Name:
2347             if c_idx == 0:
2348                 ref = ET.SubElement(
2349                     tdata,
2350                     u"a",
2351                     attrib=dict(
2352                         href=f"../trending/"
2353                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2354                     )
2355                 )
2356                 ref.text = item
2357             else:
2358                 tdata.text = item
2359     try:
2360         with open(table[u"output-file"], u'w') as html_file:
2361             logging.info(f"    Writing file: {table[u'output-file']}")
2362             html_file.write(u".. raw:: html\n\n\t")
2363             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2364             html_file.write(u"\n\t<p><br><br></p>\n")
2365     except KeyError:
2366         logging.warning(u"The output file is not defined.")
2367         return
2368
2369
2370 def table_comparison(table, input_data):
2371     """Generate the table(s) with algorithm: table_comparison
2372     specified in the specification file.
2373
2374     :param table: Table to generate.
2375     :param input_data: Data to process.
2376     :type table: pandas.Series
2377     :type input_data: InputData
2378     """
2379     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2380
2381     # Transform the data
2382     logging.info(
2383         f"    Creating the data set for the {table.get(u'type', u'')} "
2384         f"{table.get(u'title', u'')}."
2385     )
2386
2387     columns = table.get(u"columns", None)
2388     if not columns:
2389         logging.error(
2390             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2391         )
2392         return
2393
2394     cols = list()
2395     for idx, col in enumerate(columns):
2396         if col.get(u"data-set", None) is None:
2397             logging.warning(f"No data for column {col.get(u'title', u'')}")
2398             continue
2399         data = input_data.filter_data(
2400             table,
2401             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2402             data=col[u"data-set"],
2403             continue_on_error=True
2404         )
2405         col_data = {
2406             u"title": col.get(u"title", f"Column{idx}"),
2407             u"data": dict()
2408         }
2409         for builds in data.values:
2410             for build in builds:
2411                 for tst_name, tst_data in build.items():
2412                     tst_name_mod = \
2413                         _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2414                     if col_data[u"data"].get(tst_name_mod, None) is None:
2415                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
2416                         if u"across testbeds" in table[u"title"].lower() or \
2417                                 u"across topologies" in table[u"title"].lower():
2418                             name = _tpc_modify_displayed_test_name(name)
2419                         col_data[u"data"][tst_name_mod] = {
2420                             u"name": name,
2421                             u"replace": True,
2422                             u"data": list(),
2423                             u"mean": None,
2424                             u"stdev": None
2425                         }
2426                     _tpc_insert_data(
2427                         target=col_data[u"data"][tst_name_mod][u"data"],
2428                         src=tst_data,
2429                         include_tests=table[u"include-tests"]
2430                     )
2431
2432         replacement = col.get(u"data-replacement", None)
2433         if replacement:
2434             rpl_data = input_data.filter_data(
2435                 table,
2436                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2437                 data=replacement,
2438                 continue_on_error=True
2439             )
2440             for builds in rpl_data.values:
2441                 for build in builds:
2442                     for tst_name, tst_data in build.items():
2443                         tst_name_mod = \
2444                             _tpc_modify_test_name(tst_name).\
2445                             replace(u"2n1l-", u"")
2446                         if col_data[u"data"].get(tst_name_mod, None) is None:
2447                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
2448                             if u"across testbeds" in table[u"title"].lower() \
2449                                     or u"across topologies" in \
2450                                     table[u"title"].lower():
2451                                 name = _tpc_modify_displayed_test_name(name)
2452                             col_data[u"data"][tst_name_mod] = {
2453                                 u"name": name,
2454                                 u"replace": False,
2455                                 u"data": list(),
2456                                 u"mean": None,
2457                                 u"stdev": None
2458                             }
2459                         if col_data[u"data"][tst_name_mod][u"replace"]:
2460                             col_data[u"data"][tst_name_mod][u"replace"] = False
2461                             col_data[u"data"][tst_name_mod][u"data"] = list()
2462                         _tpc_insert_data(
2463                             target=col_data[u"data"][tst_name_mod][u"data"],
2464                             src=tst_data,
2465                             include_tests=table[u"include-tests"]
2466                         )
2467
2468         if table[u"include-tests"] in (u"NDR", u"PDR"):
2469             for tst_name, tst_data in col_data[u"data"].items():
2470                 if tst_data[u"data"]:
2471                     tst_data[u"mean"] = mean(tst_data[u"data"])
2472                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
2473         elif table[u"include-tests"] in (u"MRR", ):
2474             for tst_name, tst_data in col_data[u"data"].items():
2475                 if tst_data[u"data"]:
2476                     tst_data[u"mean"] = tst_data[u"data"][0]
2477                     tst_data[u"stdev"] = tst_data[u"data"][0]
2478
2479         cols.append(col_data)
2480
2481     tbl_dict = dict()
2482     for col in cols:
2483         for tst_name, tst_data in col[u"data"].items():
2484             if tbl_dict.get(tst_name, None) is None:
2485                 tbl_dict[tst_name] = {
2486                     "name": tst_data[u"name"]
2487                 }
2488             tbl_dict[tst_name][col[u"title"]] = {
2489                 u"mean": tst_data[u"mean"],
2490                 u"stdev": tst_data[u"stdev"]
2491             }
2492
2493     tbl_lst = list()
2494     for tst_data in tbl_dict.values():
2495         row = [tst_data[u"name"], ]
2496         for col in cols:
2497             row.append(tst_data.get(col[u"title"], None))
2498         tbl_lst.append(row)
2499
2500     comparisons = table.get(u"comparisons", None)
2501     if comparisons and isinstance(comparisons, list):
2502         for idx, comp in enumerate(comparisons):
2503             try:
2504                 col_ref = int(comp[u"reference"])
2505                 col_cmp = int(comp[u"compare"])
2506             except KeyError:
2507                 logging.warning(u"Comparison: No references defined! Skipping.")
2508                 comparisons.pop(idx)
2509                 continue
2510             if not (0 < col_ref <= len(cols) and
2511                     0 < col_cmp <= len(cols)) or \
2512                     col_ref == col_cmp:
2513                 logging.warning(f"Wrong values of reference={col_ref} "
2514                                 f"and/or compare={col_cmp}. Skipping.")
2515                 comparisons.pop(idx)
2516                 continue
2517
2518     tbl_cmp_lst = list()
2519     if comparisons:
2520         for row in tbl_lst:
2521             new_row = deepcopy(row)
2522             add_to_tbl = False
2523             for comp in comparisons:
2524                 ref_itm = row[int(comp[u"reference"])]
2525                 if ref_itm is None and \
2526                         comp.get(u"reference-alt", None) is not None:
2527                     ref_itm = row[int(comp[u"reference-alt"])]
2528                 cmp_itm = row[int(comp[u"compare"])]
2529                 if ref_itm is not None and cmp_itm is not None and \
2530                         ref_itm[u"mean"] is not None and \
2531                         cmp_itm[u"mean"] is not None and \
2532                         ref_itm[u"stdev"] is not None and \
2533                         cmp_itm[u"stdev"] is not None:
2534                     delta, d_stdev = relative_change_stdev(
2535                         ref_itm[u"mean"], cmp_itm[u"mean"],
2536                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
2537                     )
2538                     new_row.append(
2539                         {
2540                             u"mean": delta * 1e6,
2541                             u"stdev": d_stdev * 1e6
2542                         }
2543                     )
2544                     add_to_tbl = True
2545                 else:
2546                     new_row.append(None)
2547             if add_to_tbl:
2548                 tbl_cmp_lst.append(new_row)
2549
2550     tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2551     tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2552
2553     rcas = list()
2554     rca_in = table.get(u"rca", None)
2555     if rca_in and isinstance(rca_in, list):
2556         for idx, itm in enumerate(rca_in):
2557             try:
2558                 with open(itm.get(u"data", u""), u"r") as rca_file:
2559                     rcas.append(
2560                         {
2561                             u"title": itm.get(u"title", f"RCA{idx}"),
2562                             u"data": load(rca_file, Loader=FullLoader)
2563                         }
2564                     )
2565             except (YAMLError, IOError) as err:
2566                 logging.warning(
2567                     f"The RCA file {itm.get(u'data', u'')} does not exist or "
2568                     f"it is corrupted!"
2569                 )
2570                 logging.debug(repr(err))
2571
2572     tbl_for_csv = list()
2573     for line in tbl_cmp_lst:
2574         row = [line[0], ]
2575         for idx, itm in enumerate(line[1:]):
2576             if itm is None:
2577                 row.append(u"NT")
2578                 row.append(u"NT")
2579             else:
2580                 row.append(round(float(itm[u'mean']) / 1e6, 3))
2581                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2582         for rca in rcas:
2583             rca_nr = rca[u"data"].get(row[0], u"-")
2584             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2585         tbl_for_csv.append(row)
2586
2587     header_csv = [u"Test Case", ]
2588     for col in cols:
2589         header_csv.append(f"Avg({col[u'title']})")
2590         header_csv.append(f"Stdev({col[u'title']})")
2591     for comp in comparisons:
2592         header_csv.append(
2593             f"Avg({comp.get(u'title', u'')})"
2594         )
2595         header_csv.append(
2596             f"Stdev({comp.get(u'title', u'')})"
2597         )
2598     header_csv.extend([rca[u"title"] for rca in rcas])
2599
2600     legend_lst = table.get(u"legend", None)
2601     if legend_lst is None:
2602         legend = u""
2603     else:
2604         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
2605
2606     footnote = u""
2607     for rca in rcas:
2608         footnote += f"\n{rca[u'title']}:\n"
2609         footnote += rca[u"data"].get(u"footnote", u"")
2610
2611     csv_file = f"{table[u'output-file']}-csv.csv"
2612     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2613         file_handler.write(
2614             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
2615         )
2616         for test in tbl_for_csv:
2617             file_handler.write(
2618                 u",".join([f'"{item}"' for item in test]) + u"\n"
2619             )
2620         if legend_lst:
2621             for item in legend_lst:
2622                 file_handler.write(f'"{item}"\n')
2623         if footnote:
2624             for itm in footnote.split(u"\n"):
2625                 file_handler.write(f'"{itm}"\n')
2626
2627     tbl_tmp = list()
2628     max_lens = [0, ] * len(tbl_cmp_lst[0])
2629     for line in tbl_cmp_lst:
2630         row = [line[0], ]
2631         for idx, itm in enumerate(line[1:]):
2632             if itm is None:
2633                 new_itm = u"NT"
2634             else:
2635                 if idx < len(cols):
2636                     new_itm = (
2637                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
2638                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2639                         replace(u"nan", u"NaN")
2640                     )
2641                 else:
2642                     new_itm = (
2643                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2644                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2645                         replace(u"nan", u"NaN")
2646                     )
2647             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2648                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2649             row.append(new_itm)
2650
2651         tbl_tmp.append(row)
2652
2653     tbl_final = list()
2654     for line in tbl_tmp:
2655         row = [line[0], ]
2656         for idx, itm in enumerate(line[1:]):
2657             if itm in (u"NT", u"NaN"):
2658                 row.append(itm)
2659                 continue
2660             itm_lst = itm.rsplit(u"\u00B1", 1)
2661             itm_lst[-1] = \
2662                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2663             row.append(u"\u00B1".join(itm_lst))
2664         for rca in rcas:
2665             rca_nr = rca[u"data"].get(row[0], u"-")
2666             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2667
2668         tbl_final.append(row)
2669
2670     header = [u"Test Case", ]
2671     header.extend([col[u"title"] for col in cols])
2672     header.extend([comp.get(u"title", u"") for comp in comparisons])
2673     header.extend([rca[u"title"] for rca in rcas])
2674
2675     # Generate csv tables:
2676     csv_file = f"{table[u'output-file']}.csv"
2677     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2678         file_handler.write(u";".join(header) + u"\n")
2679         for test in tbl_final:
2680             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2681
2682     # Generate txt table:
2683     txt_file_name = f"{table[u'output-file']}.txt"
2684     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2685
2686     with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2687         txt_file.write(legend)
2688         if footnote:
2689             txt_file.write(footnote)
2690         txt_file.write(u"\n:END")
2691
2692     # Generate html table:
2693     _tpc_generate_html_table(
2694         header,
2695         tbl_final,
2696         table[u'output-file'],
2697         legend=legend,
2698         footnote=footnote,
2699         sort_data=False,
2700         title=table.get(u"title", u"")
2701     )
2702
2703
2704 def table_weekly_comparison(table, in_data):
2705     """Generate the table(s) with algorithm: table_weekly_comparison
2706     specified in the specification file.
2707
2708     :param table: Table to generate.
2709     :param in_data: Data to process.
2710     :type table: pandas.Series
2711     :type in_data: InputData
2712     """
2713     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2714
2715     # Transform the data
2716     logging.info(
2717         f"    Creating the data set for the {table.get(u'type', u'')} "
2718         f"{table.get(u'title', u'')}."
2719     )
2720
2721     incl_tests = table.get(u"include-tests", None)
2722     if incl_tests not in (u"NDR", u"PDR"):
2723         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2724         return
2725
2726     nr_cols = table.get(u"nr-of-data-columns", None)
2727     if not nr_cols or nr_cols < 2:
2728         logging.error(
2729             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2730         )
2731         return
2732
2733     data = in_data.filter_data(
2734         table,
2735         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2736         continue_on_error=True
2737     )
2738
2739     header = [
2740         [u"Version"],
2741         [u"Date", ],
2742         [u"Build", ],
2743         [u"Testbed", ]
2744     ]
2745     tbl_dict = dict()
2746     idx = 0
2747     tb_tbl = table.get(u"testbeds", None)
2748     for job_name, job_data in data.items():
2749         for build_nr, build in job_data.items():
2750             if idx >= nr_cols:
2751                 break
2752             if build.empty:
2753                 continue
2754
2755             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2756             if tb_ip and tb_tbl:
2757                 testbed = tb_tbl.get(tb_ip, u"")
2758             else:
2759                 testbed = u""
2760             header[2].insert(1, build_nr)
2761             header[3].insert(1, testbed)
2762             header[1].insert(
2763                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2764             )
2765             header[0].insert(
2766                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2767             )
2768
2769             for tst_name, tst_data in build.items():
2770                 tst_name_mod = \
2771                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2772                 if not tbl_dict.get(tst_name_mod, None):
2773                     tbl_dict[tst_name_mod] = dict(
2774                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2775                     )
2776                 try:
2777                     tbl_dict[tst_name_mod][-idx - 1] = \
2778                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2779                 except (TypeError, IndexError, KeyError, ValueError):
2780                     pass
2781             idx += 1
2782
2783     if idx < nr_cols:
2784         logging.error(u"Not enough data to build the table! Skipping")
2785         return
2786
2787     cmp_dict = dict()
2788     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2789         idx_ref = cmp.get(u"reference", None)
2790         idx_cmp = cmp.get(u"compare", None)
2791         if idx_ref is None or idx_cmp is None:
2792             continue
2793         header[0].append(f"Diff{idx + 1}")
2794         header[1].append(header[0][idx_ref - idx - 1])
2795         header[2].append(u"vs")
2796         header[3].append(header[0][idx_cmp - idx - 1])
2797         for tst_name, tst_data in tbl_dict.items():
2798             if not cmp_dict.get(tst_name, None):
2799                 cmp_dict[tst_name] = list()
2800             ref_data = tst_data.get(idx_ref, None)
2801             cmp_data = tst_data.get(idx_cmp, None)
2802             if ref_data is None or cmp_data is None:
2803                 cmp_dict[tst_name].append(float('nan'))
2804             else:
2805                 cmp_dict[tst_name].append(
2806                     relative_change(ref_data, cmp_data)
2807                 )
2808
2809     tbl_lst = list()
2810     for tst_name, tst_data in tbl_dict.items():
2811         itm_lst = [tst_data[u"name"], ]
2812         for idx in range(nr_cols):
2813             item = tst_data.get(-idx - 1, None)
2814             if item is None:
2815                 itm_lst.insert(1, None)
2816             else:
2817                 itm_lst.insert(1, round(item / 1e6, 1))
2818         itm_lst.extend(
2819             [
2820                 None if itm is None else round(itm, 1)
2821                 for itm in cmp_dict[tst_name]
2822             ]
2823         )
2824         tbl_lst.append(itm_lst)
2825
2826     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2827     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
2828
2829     # Generate csv table:
2830     csv_file = f"{table[u'output-file']}.csv"
2831     with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2832         for hdr in header:
2833             file_handler.write(u",".join(hdr) + u"\n")
2834         for test in tbl_lst:
2835             file_handler.write(u",".join(
2836                 [
2837                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2838                     replace(u"null", u"-") for item in test
2839                 ]
2840             ) + u"\n")
2841
2842     txt_file = f"{table[u'output-file']}.txt"
2843     convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
2844
2845     # Reorganize header in txt table
2846     txt_table = list()
2847     with open(txt_file, u"rt", encoding='utf-8') as file_handler:
2848         for line in file_handler:
2849             txt_table.append(line)
2850     try:
2851         txt_table.insert(5, txt_table.pop(2))
2852         with open(txt_file, u"wt", encoding='utf-8') as file_handler:
2853             file_handler.writelines(txt_table)
2854     except IndexError:
2855         pass
2856
2857     # Generate html table:
2858     hdr_html = [
2859         u"<br>".join(row) for row in zip(*header)
2860     ]
2861     _tpc_generate_html_table(
2862         hdr_html,
2863         tbl_lst,
2864         table[u'output-file'],
2865         sort_data=True,
2866         title=table.get(u"title", u""),
2867         generate_rst=False
2868     )