Trending: Update graphs - 2n-clx
[csit.git] / resources / tools / presentation / generator_tables.py
index 0d52ce1..e530942 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -29,8 +29,9 @@ import plotly.offline as ploff
 import pandas as pd
 
 from numpy import nan, isnan
+from yaml import load, FullLoader, YAMLError
 
-from pal_utils import mean, stdev, relative_change, classify_anomalies, \
+from pal_utils import mean, stdev, classify_anomalies, \
     convert_csv_to_pretty_txt, relative_change_stdev
 
 
@@ -98,10 +99,10 @@ def table_oper_data_html(table, input_data):
     data = input_data.merge_data(data)
 
     sort_tests = table.get(u"sort", None)
-    if sort_tests and sort_tests in (u"ascending", u"descending"):
+    if sort_tests:
         args = dict(
             inplace=True,
-            ascending=True if sort_tests == u"ascending" else False
+            ascending=(sort_tests == u"ascending")
         )
         data.sort_index(**args)
 
@@ -151,6 +152,17 @@ def table_oper_data_html(table, input_data):
                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
             )
             tcol.text = u"No Data"
+
+            trow = ET.SubElement(
+                tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
+            )
+            thead = ET.SubElement(
+                trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+            )
+            font = ET.SubElement(
+                thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
+            )
+            font.text = u"."
             return str(ET.tostring(tbl, encoding=u"unicode"))
 
         tbl_hdr = (
@@ -162,7 +174,7 @@ def table_oper_data_html(table, input_data):
             u"Average Vector Size"
         )
 
-        for dut_name, dut_data in tst_data[u"show-run"].items():
+        for dut_data in tst_data[u"show-run"].values():
             trow = ET.SubElement(
                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
             )
@@ -172,15 +184,7 @@ def table_oper_data_html(table, input_data):
             if dut_data.get(u"threads", None) is None:
                 tcol.text = u"No Data"
                 continue
-            bold = ET.SubElement(tcol, u"b")
-            bold.text = dut_name
 
-            trow = ET.SubElement(
-                tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
-            )
-            tcol = ET.SubElement(
-                trow, u"td", attrib=dict(align=u"left", colspan=u"6")
-            )
             bold = ET.SubElement(tcol, u"b")
             bold.text = (
                 f"Host IP: {dut_data.get(u'host', '')}, "
@@ -261,7 +265,7 @@ def table_oper_data_html(table, input_data):
         if not html_table:
             continue
         try:
-            file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
+            file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
             with open(f"{file_name}", u'w') as html_file:
                 logging.info(f"    Writing file: {file_name}")
                 html_file.write(u".. raw:: html\n\n\t")
@@ -284,6 +288,7 @@ def table_merged_details(table, input_data):
     """
 
     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
+
     # Transform the data
     logging.info(
         f"    Creating the data set for the {table.get(u'type', u'')} "
@@ -293,10 +298,10 @@ def table_merged_details(table, input_data):
     data = input_data.merge_data(data)
 
     sort_tests = table.get(u"sort", None)
-    if sort_tests and sort_tests in (u"ascending", u"descending"):
+    if sort_tests:
         args = dict(
             inplace=True,
-            ascending=True if sort_tests == u"ascending" else False
+            ascending=(sort_tests == u"ascending")
         )
         data.sort_index(**args)
 
@@ -323,6 +328,9 @@ def table_merged_details(table, input_data):
                 try:
                     col_data = str(data[test][column[
                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
+                    # Do not include tests with "Test Failed" in test message
+                    if u"Test Failed" in col_data:
+                        continue
                     col_data = col_data.replace(
                         u"No Data", u"Not Captured     "
                     )
@@ -335,19 +343,27 @@ def table_merged_details(table, input_data):
                                        f"{u'-'.join(col_data_lst[half:])}"
                         col_data = f" |prein| {col_data} |preout| "
                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
+                        # Temporary solution: remove NDR results from message:
+                        if bool(table.get(u'remove-ndr', False)):
+                            try:
+                                col_data = col_data.split(u" |br| ", 1)[1]
+                            except IndexError:
+                                pass
                         col_data = f" |prein| {col_data} |preout| "
                     elif column[u"data"].split(u" ")[1] in \
-                        (u"conf-history", u"show-run"):
+                            (u"conf-history", u"show-run"):
                         col_data = col_data.replace(u" |br| ", u"", 1)
                         col_data = f" |prein| {col_data[:-5]} |preout| "
                     row_lst.append(f'"{col_data}"')
                 except KeyError:
                     row_lst.append(u'"Not captured"')
-            table_lst.append(row_lst)
+            if len(row_lst) == len(table[u"columns"]):
+                table_lst.append(row_lst)
 
         # Write the data to file
         if table_lst:
-            file_name = f"{table[u'output-file']}_{suite_name}.csv"
+            separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
+            file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
             logging.info(f"      Writing file: {file_name}")
             with open(file_name, u"wt") as file_handler:
                 file_handler.write(u",".join(header) + u"\n")
@@ -411,7 +427,12 @@ def _tpc_insert_data(target, src, include_tests):
     """
     try:
         if include_tests == u"MRR":
-            target.append(src[u"result"][u"receive-rate"])
+            target.append(
+                (
+                    src[u"result"][u"receive-rate"],
+                    src[u"result"][u"receive-stdev"]
+                )
+            )
         elif include_tests == u"PDR":
             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
         elif include_tests == u"NDR":
@@ -433,7 +454,6 @@ def _tpc_sort_table(table):
     :rtype: list
     """
 
-
     tbl_new = list()
     tbl_see = list()
     tbl_delta = list()
@@ -449,19 +469,22 @@ def _tpc_sort_table(table):
     # Sort the tables:
     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
-    tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
-    tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
+    tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
+    tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
+    tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
 
     # Put the tables together:
     table = list()
-    table.extend(tbl_new)
+    # We do not want "New in CSIT":
+    # table.extend(tbl_new)
     table.extend(tbl_see)
     table.extend(tbl_delta)
 
     return table
 
 
-def _tpc_generate_html_table(header, data, output_file_name):
+def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
+                             footnote=u""):
     """Generate html table from input data with simple sorting possibility.
 
     :param header: Table header.
@@ -469,21 +492,35 @@ def _tpc_generate_html_table(header, data, output_file_name):
         Inner lists are rows in the table. All inner lists must be of the same
         length. The length of these lists must be the same as the length of the
         header.
-    :param output_file_name: The name (relative or full path) where the
+    :param out_file_name: The name (relative or full path) where the
         generated html table is written.
+    :param legend: The legend to display below the table.
+    :param footnote: The footnote to display below the table (and legend).
     :type header: list
     :type data: list of lists
-    :type output_file_name: str
+    :type out_file_name: str
+    :type legend: str
+    :type footnote: str
     """
 
+    try:
+        idx = header.index(u"Test Case")
+    except ValueError:
+        idx = 0
+    params = {
+        u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
+        u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
+        u"width": ([28, 9], [4, 24, 10])
+    }
+
     df_data = pd.DataFrame(data, columns=header)
 
     df_sorted = [df_data.sort_values(
-        by=[key, header[0]], ascending=[True, True]
-        if key != header[0] else [False, True]) for key in header]
+        by=[key, header[idx]], ascending=[True, True]
+        if key != header[idx] else [False, True]) for key in header]
     df_sorted_rev = [df_data.sort_values(
-        by=[key, header[0]], ascending=[False, True]
-        if key != header[0] else [True, True]) for key in header]
+        by=[key, header[idx]], ascending=[False, True]
+        if key != header[idx] else [True, True]) for key in header]
     df_sorted.extend(df_sorted_rev)
 
     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
@@ -491,7 +528,7 @@ def _tpc_generate_html_table(header, data, output_file_name):
     table_header = dict(
         values=[f"<b>{item}</b>" for item in header],
         fill_color=u"#7eade7",
-        align=[u"left", u"center"]
+        align=params[u"align-hdr"][idx]
     )
 
     fig = go.Figure()
@@ -500,12 +537,12 @@ def _tpc_generate_html_table(header, data, output_file_name):
         columns = [table.get(col) for col in header]
         fig.add_trace(
             go.Table(
-                columnwidth=[30, 10],
+                columnwidth=params[u"width"][idx],
                 header=table_header,
                 cells=dict(
                     values=columns,
                     fill_color=fill_color,
-                    align=[u"left", u"right"]
+                    align=params[u"align-itm"][idx]
                 )
             )
         )
@@ -530,28 +567,46 @@ def _tpc_generate_html_table(header, data, output_file_name):
             go.layout.Updatemenu(
                 type=u"dropdown",
                 direction=u"down",
-                x=0.03,
+                x=0.0,
                 xanchor=u"left",
                 y=1.045,
                 yanchor=u"top",
                 active=len(menu_items) - 1,
                 buttons=list(buttons)
             )
-        ],
-        annotations=[
-            go.layout.Annotation(
-                text=u"<b>Sort by:</b>",
-                x=0,
-                xref=u"paper",
-                y=1.035,
-                yref=u"paper",
-                align=u"left",
-                showarrow=False
-            )
         ]
     )
 
-    ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
+    ploff.plot(
+        fig,
+        show_link=False,
+        auto_open=False,
+        filename=f"{out_file_name}_in.html"
+    )
+
+    file_name = out_file_name.split(u"/")[-1]
+    if u"vpp" in out_file_name:
+        path = u"_tmp/src/vpp_performance_tests/comparisons/"
+    else:
+        path = u"_tmp/src/dpdk_performance_tests/comparisons/"
+    with open(f"{path}{file_name}.rst", u"wt") as rst_file:
+        rst_file.write(
+            u"\n"
+            u".. |br| raw:: html\n\n    <br />\n\n\n"
+            u".. |prein| raw:: html\n\n    <pre>\n\n\n"
+            u".. |preout| raw:: html\n\n    </pre>\n\n"
+        )
+        rst_file.write(
+            u".. raw:: html\n\n"
+            f'    <iframe frameborder="0" scrolling="no" '
+            f'width="1600" height="1000" '
+            f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
+            f'</iframe>\n\n'
+        )
+        if legend:
+            rst_file.write(legend[1:].replace(u"\n", u" |br| "))
+        if footnote:
+            rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
 
 
 def table_perf_comparison(table, input_data):
@@ -575,44 +630,98 @@ def table_perf_comparison(table, input_data):
 
     # Prepare the header of the tables
     try:
-        header = [u"Test case", ]
+        header = [u"Test Case", ]
+        legend = u"\nLegend:\n"
 
-        if table[u"include-tests"] == u"MRR":
-            hdr_param = u"Rec Rate"
-        else:
-            hdr_param = u"Thput"
+        rca_data = None
+        rca = table.get(u"rca", None)
+        if rca:
+            try:
+                with open(rca.get(u"data-file", ""), u"r") as rca_file:
+                    rca_data = load(rca_file, Loader=FullLoader)
+                header.insert(0, rca.get(u"title", "RCA"))
+                legend += (
+                    u"RCA: Reference to the Root Cause Analysis, see below.\n"
+                )
+            except (YAMLError, IOError) as err:
+                logging.warning(repr(err))
 
         history = table.get(u"history", list())
         for item in history:
             header.extend(
                 [
-                    f"{item[u'title']} {hdr_param} [Mpps]",
-                    f"{item[u'title']} Stdev [Mpps]"
+                    f"{item[u'title']} Avg({table[u'include-tests']})",
+                    f"{item[u'title']} Stdev({table[u'include-tests']})"
                 ]
             )
+            legend += (
+                f"{item[u'title']} Avg({table[u'include-tests']}): "
+                f"Mean value of {table[u'include-tests']} [Mpps] computed from "
+                f"a series of runs of the listed tests executed against "
+                f"{item[u'title']}.\n"
+                f"{item[u'title']} Stdev({table[u'include-tests']}): "
+                f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+                f"computed from a series of runs of the listed tests executed "
+                f"against {item[u'title']}.\n"
+            )
         header.extend(
             [
-                f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
-                f"{table[u'reference'][u'title']} Stdev [Mpps]",
-                f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
-                f"{table[u'compare'][u'title']} Stdev [Mpps]",
-                u"Delta [%]"
+                f"{table[u'reference'][u'title']} "
+                f"Avg({table[u'include-tests']})",
+                f"{table[u'reference'][u'title']} "
+                f"Stdev({table[u'include-tests']})",
+                f"{table[u'compare'][u'title']} "
+                f"Avg({table[u'include-tests']})",
+                f"{table[u'compare'][u'title']} "
+                f"Stdev({table[u'include-tests']})",
+                f"Diff({table[u'reference'][u'title']},"
+                f"{table[u'compare'][u'title']})",
+                u"Stdev(Diff)"
             ]
         )
-        header_str = u",".join(header) + u"\n"
+        header_str = u";".join(header) + u"\n"
+        legend += (
+            f"{table[u'reference'][u'title']} "
+            f"Avg({table[u'include-tests']}): "
+            f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+            f"series of runs of the listed tests executed against "
+            f"{table[u'reference'][u'title']}.\n"
+            f"{table[u'reference'][u'title']} "
+            f"Stdev({table[u'include-tests']}): "
+            f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+            f"computed from a series of runs of the listed tests executed "
+            f"against {table[u'reference'][u'title']}.\n"
+            f"{table[u'compare'][u'title']} "
+            f"Avg({table[u'include-tests']}): "
+            f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+            f"series of runs of the listed tests executed against "
+            f"{table[u'compare'][u'title']}.\n"
+            f"{table[u'compare'][u'title']} "
+            f"Stdev({table[u'include-tests']}): "
+            f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+            f"computed from a series of runs of the listed tests executed "
+            f"against {table[u'compare'][u'title']}.\n"
+            f"Diff({table[u'reference'][u'title']},"
+            f"{table[u'compare'][u'title']}): "
+            f"Percentage change calculated for mean values.\n"
+            u"Stdev(Diff): "
+            u"Standard deviation of percentage change calculated for mean "
+            u"values.\n"
+            u"NT: Not Tested\n"
+        )
     except (AttributeError, KeyError) as err:
         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
         return
 
     # Prepare data to the table:
     tbl_dict = dict()
-    # topo = ""
     for job, builds in table[u"reference"][u"data"].items():
-        # topo = u"2n-skx" if u"2n-skx" in job else u""
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].items():
                 tst_name_mod = _tpc_modify_test_name(tst_name)
-                if u"across topologies" in table[u"title"].lower():
+                if (u"across topologies" in table[u"title"].lower() or
+                        (u" 3n-" in table[u"title"].lower() and
+                         u" 2n-" in table[u"title"].lower())):
                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                 if tbl_dict.get(tst_name_mod, None) is None:
                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
@@ -640,7 +749,9 @@ def table_perf_comparison(table, input_data):
             for build in builds:
                 for tst_name, tst_data in rpl_data[job][str(build)].items():
                     tst_name_mod = _tpc_modify_test_name(tst_name)
-                    if u"across topologies" in table[u"title"].lower():
+                    if (u"across topologies" in table[u"title"].lower() or
+                            (u" 3n-" in table[u"title"].lower() and
+                             u" 2n-" in table[u"title"].lower())):
                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                     if tbl_dict.get(tst_name_mod, None) is None:
                         name = \
@@ -667,7 +778,9 @@ def table_perf_comparison(table, input_data):
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].items():
                 tst_name_mod = _tpc_modify_test_name(tst_name)
-                if u"across topologies" in table[u"title"].lower():
+                if (u"across topologies" in table[u"title"].lower() or
+                        (u" 3n-" in table[u"title"].lower() and
+                         u" 2n-" in table[u"title"].lower())):
                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                 if tbl_dict.get(tst_name_mod, None) is None:
                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
@@ -697,7 +810,9 @@ def table_perf_comparison(table, input_data):
             for build in builds:
                 for tst_name, tst_data in rpl_data[job][str(build)].items():
                     tst_name_mod = _tpc_modify_test_name(tst_name)
-                    if u"across topologies" in table[u"title"].lower():
+                    if (u"across topologies" in table[u"title"].lower() or
+                            (u" 3n-" in table[u"title"].lower() and
+                             u" 2n-" in table[u"title"].lower())):
                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                     if tbl_dict.get(tst_name_mod, None) is None:
                         name = \
@@ -725,7 +840,9 @@ def table_perf_comparison(table, input_data):
             for build in builds:
                 for tst_name, tst_data in data[job][str(build)].items():
                     tst_name_mod = _tpc_modify_test_name(tst_name)
-                    if u"across topologies" in table[u"title"].lower():
+                    if (u"across topologies" in table[u"title"].lower() or
+                            (u" 3n-" in table[u"title"].lower() and
+                             u" 2n-" in table[u"title"].lower())):
                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                     if tbl_dict.get(tst_name_mod, None) is None:
                         continue
@@ -737,7 +854,8 @@ def table_perf_comparison(table, input_data):
                             u"title"]] = list()
                     try:
                         if table[u"include-tests"] == u"MRR":
-                            res = tst_data[u"result"][u"receive-rate"]
+                            res = (tst_data[u"result"][u"receive-rate"],
+                                   tst_data[u"result"][u"receive-stdev"])
                         elif table[u"include-tests"] == u"PDR":
                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
                         elif table[u"include-tests"] == u"NDR":
@@ -750,41 +868,71 @@ def table_perf_comparison(table, input_data):
                         pass
 
     tbl_lst = list()
-    footnote = False
     for tst_name in tbl_dict:
         item = [tbl_dict[tst_name][u"name"], ]
         if history:
             if tbl_dict[tst_name].get(u"history", None) is not None:
                 for hist_data in tbl_dict[tst_name][u"history"].values():
                     if hist_data:
-                        item.append(round(mean(hist_data) / 1000000, 2))
-                        item.append(round(stdev(hist_data) / 1000000, 2))
+                        if table[u"include-tests"] == u"MRR":
+                            item.append(round(hist_data[0][0] / 1e6, 1))
+                            item.append(round(hist_data[0][1] / 1e6, 1))
+                        else:
+                            item.append(round(mean(hist_data) / 1e6, 1))
+                            item.append(round(stdev(hist_data) / 1e6, 1))
                     else:
-                        item.extend([u"Not tested", u"Not tested"])
+                        item.extend([u"NT", u"NT"])
+            else:
+                item.extend([u"NT", u"NT"])
+        data_r = tbl_dict[tst_name][u"ref-data"]
+        if data_r:
+            if table[u"include-tests"] == u"MRR":
+                data_r_mean = data_r[0][0]
+                data_r_stdev = data_r[0][1]
             else:
-                item.extend([u"Not tested", u"Not tested"])
-        data_t = tbl_dict[tst_name][u"ref-data"]
-        if data_t:
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+                data_r_mean = mean(data_r)
+                data_r_stdev = stdev(data_r)
+            item.append(round(data_r_mean / 1e6, 1))
+            item.append(round(data_r_stdev / 1e6, 1))
         else:
-            item.extend([u"Not tested", u"Not tested"])
-        data_t = tbl_dict[tst_name][u"cmp-data"]
-        if data_t:
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+            data_r_mean = None
+            data_r_stdev = None
+            item.extend([u"NT", u"NT"])
+        data_c = tbl_dict[tst_name][u"cmp-data"]
+        if data_c:
+            if table[u"include-tests"] == u"MRR":
+                data_c_mean = data_c[0][0]
+                data_c_stdev = data_c[0][1]
+            else:
+                data_c_mean = mean(data_c)
+                data_c_stdev = stdev(data_c)
+            item.append(round(data_c_mean / 1e6, 1))
+            item.append(round(data_c_stdev / 1e6, 1))
         else:
-            item.extend([u"Not tested", u"Not tested"])
-        if item[-2] == u"Not tested":
+            data_c_mean = None
+            data_c_stdev = None
+            item.extend([u"NT", u"NT"])
+        if item[-2] == u"NT":
             pass
-        elif item[-4] == u"Not tested":
+        elif item[-4] == u"NT":
             item.append(u"New in CSIT-2001")
-        # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
-        #     item.append(u"See footnote [1]")
-        #     footnote = True
-        elif item[-4] != 0:
-            item.append(int(relative_change(float(item[-4]), float(item[-2]))))
-        if (len(item) == len(header)) and (item[-3] != u"Not tested"):
+            item.append(u"New in CSIT-2001")
+        elif data_r_mean is not None and data_c_mean is not None:
+            delta, d_stdev = relative_change_stdev(
+                data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
+            )
+            try:
+                item.append(round(delta))
+            except ValueError:
+                item.append(delta)
+            try:
+                item.append(round(d_stdev))
+            except ValueError:
+                item.append(d_stdev)
+        if rca_data:
+            rca_nr = rca_data.get(item[0], u"-")
+            item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+        if (len(item) == len(header)) and (item[-4] != u"NT"):
             tbl_lst.append(item)
 
     tbl_lst = _tpc_sort_table(tbl_lst)
@@ -794,27 +942,28 @@ def table_perf_comparison(table, input_data):
     with open(csv_file, u"wt") as file_handler:
         file_handler.write(header_str)
         for test in tbl_lst:
-            file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+            file_handler.write(u";".join([str(item) for item in test]) + u"\n")
 
     txt_file_name = f"{table[u'output-file']}.txt"
-    convert_csv_to_pretty_txt(csv_file, txt_file_name)
-
-    if footnote:
-        with open(txt_file_name, u'a') as txt_file:
-            txt_file.writelines([
-                u"\nFootnotes:\n",
-                u"[1] CSIT-1908 changed test methodology of dot1q tests in "
-                u"2-node testbeds, dot1q encapsulation is now used on both "
-                u"links of SUT.\n",
-                u"    Previously dot1q was used only on a single link with the "
-                u"other link carrying untagged Ethernet frames. This changes "
-                u"results\n",
-                u"    in slightly lower throughput in CSIT-1908 for these "
-                u"tests. See release notes."
-            ])
+    convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
+
+    footnote = u""
+    with open(txt_file_name, u'a') as txt_file:
+        txt_file.write(legend)
+        if rca_data:
+            footnote = rca_data.get(u"footnote", u"")
+            if footnote:
+                txt_file.write(footnote)
+        txt_file.write(u":END")
 
     # Generate html table:
-    _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+    _tpc_generate_html_table(
+        header,
+        tbl_lst,
+        table[u'output-file'],
+        legend=legend,
+        footnote=footnote
+    )
 
 
 def table_perf_comparison_nic(table, input_data):
@@ -838,46 +987,100 @@ def table_perf_comparison_nic(table, input_data):
 
     # Prepare the header of the tables
     try:
-        header = [u"Test case", ]
+        header = [u"Test Case", ]
+        legend = u"\nLegend:\n"
 
-        if table[u"include-tests"] == u"MRR":
-            hdr_param = u"Rec Rate"
-        else:
-            hdr_param = u"Thput"
+        rca_data = None
+        rca = table.get(u"rca", None)
+        if rca:
+            try:
+                with open(rca.get(u"data-file", ""), u"r") as rca_file:
+                    rca_data = load(rca_file, Loader=FullLoader)
+                header.insert(0, rca.get(u"title", "RCA"))
+                legend += (
+                    u"RCA: Reference to the Root Cause Analysis, see below.\n"
+                )
+            except (YAMLError, IOError) as err:
+                logging.warning(repr(err))
 
         history = table.get(u"history", list())
         for item in history:
             header.extend(
                 [
-                    f"{item[u'title']} {hdr_param} [Mpps]",
-                    f"{item[u'title']} Stdev [Mpps]"
+                    f"{item[u'title']} Avg({table[u'include-tests']})",
+                    f"{item[u'title']} Stdev({table[u'include-tests']})"
                 ]
             )
+            legend += (
+                f"{item[u'title']} Avg({table[u'include-tests']}): "
+                f"Mean value of {table[u'include-tests']} [Mpps] computed from "
+                f"a series of runs of the listed tests executed against "
+                f"{item[u'title']}.\n"
+                f"{item[u'title']} Stdev({table[u'include-tests']}): "
+                f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+                f"computed from a series of runs of the listed tests executed "
+                f"against {item[u'title']}.\n"
+            )
         header.extend(
             [
-                f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
-                f"{table[u'reference'][u'title']} Stdev [Mpps]",
-                f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
-                f"{table[u'compare'][u'title']} Stdev [Mpps]",
-                u"Delta [%]"
+                f"{table[u'reference'][u'title']} "
+                f"Avg({table[u'include-tests']})",
+                f"{table[u'reference'][u'title']} "
+                f"Stdev({table[u'include-tests']})",
+                f"{table[u'compare'][u'title']} "
+                f"Avg({table[u'include-tests']})",
+                f"{table[u'compare'][u'title']} "
+                f"Stdev({table[u'include-tests']})",
+                f"Diff({table[u'reference'][u'title']},"
+                f"{table[u'compare'][u'title']})",
+                u"Stdev(Diff)"
             ]
         )
-        header_str = u",".join(header) + u"\n"
+        header_str = u";".join(header) + u"\n"
+        legend += (
+            f"{table[u'reference'][u'title']} "
+            f"Avg({table[u'include-tests']}): "
+            f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+            f"series of runs of the listed tests executed against "
+            f"{table[u'reference'][u'title']}.\n"
+            f"{table[u'reference'][u'title']} "
+            f"Stdev({table[u'include-tests']}): "
+            f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+            f"computed from a series of runs of the listed tests executed "
+            f"against {table[u'reference'][u'title']}.\n"
+            f"{table[u'compare'][u'title']} "
+            f"Avg({table[u'include-tests']}): "
+            f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+            f"series of runs of the listed tests executed against "
+            f"{table[u'compare'][u'title']}.\n"
+            f"{table[u'compare'][u'title']} "
+            f"Stdev({table[u'include-tests']}): "
+            f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+            f"computed from a series of runs of the listed tests executed "
+            f"against {table[u'compare'][u'title']}.\n"
+            f"Diff({table[u'reference'][u'title']},"
+            f"{table[u'compare'][u'title']}): "
+            f"Percentage change calculated for mean values.\n"
+            u"Stdev(Diff): "
+            u"Standard deviation of percentage change calculated for mean "
+            u"values.\n"
+            u"NT: Not Tested\n"
+        )
     except (AttributeError, KeyError) as err:
         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
         return
 
     # Prepare data to the table:
     tbl_dict = dict()
-    # topo = u""
     for job, builds in table[u"reference"][u"data"].items():
-        # topo = u"2n-skx" if u"2n-skx" in job else u""
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].items():
                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
                     continue
                 tst_name_mod = _tpc_modify_test_name(tst_name)
-                if u"across topologies" in table[u"title"].lower():
+                if (u"across topologies" in table[u"title"].lower() or
+                        (u" 3n-" in table[u"title"].lower() and
+                         u" 2n-" in table[u"title"].lower())):
                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                 if tbl_dict.get(tst_name_mod, None) is None:
                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
@@ -906,7 +1109,9 @@ def table_perf_comparison_nic(table, input_data):
                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
                         continue
                     tst_name_mod = _tpc_modify_test_name(tst_name)
-                    if u"across topologies" in table[u"title"].lower():
+                    if (u"across topologies" in table[u"title"].lower() or
+                            (u" 3n-" in table[u"title"].lower() and
+                             u" 2n-" in table[u"title"].lower())):
                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                     if tbl_dict.get(tst_name_mod, None) is None:
                         name = \
@@ -935,7 +1140,9 @@ def table_perf_comparison_nic(table, input_data):
                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
                     continue
                 tst_name_mod = _tpc_modify_test_name(tst_name)
-                if u"across topologies" in table[u"title"].lower():
+                if (u"across topologies" in table[u"title"].lower() or
+                        (u" 3n-" in table[u"title"].lower() and
+                         u" 2n-" in table[u"title"].lower())):
                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                 if tbl_dict.get(tst_name_mod, None) is None:
                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
@@ -964,7 +1171,9 @@ def table_perf_comparison_nic(table, input_data):
                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
                         continue
                     tst_name_mod = _tpc_modify_test_name(tst_name)
-                    if u"across topologies" in table[u"title"].lower():
+                    if (u"across topologies" in table[u"title"].lower() or
+                            (u" 3n-" in table[u"title"].lower() and
+                             u" 2n-" in table[u"title"].lower())):
                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                     if tbl_dict.get(tst_name_mod, None) is None:
                         name = \
@@ -994,7 +1203,9 @@ def table_perf_comparison_nic(table, input_data):
                     if item[u"nic"] not in tst_data[u"tags"]:
                         continue
                     tst_name_mod = _tpc_modify_test_name(tst_name)
-                    if u"across topologies" in table[u"title"].lower():
+                    if (u"across topologies" in table[u"title"].lower() or
+                            (u" 3n-" in table[u"title"].lower() and
+                             u" 2n-" in table[u"title"].lower())):
                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
                     if tbl_dict.get(tst_name_mod, None) is None:
                         continue
@@ -1006,7 +1217,8 @@ def table_perf_comparison_nic(table, input_data):
                             u"title"]] = list()
                     try:
                         if table[u"include-tests"] == u"MRR":
-                            res = tst_data[u"result"][u"receive-rate"]
+                            res = (tst_data[u"result"][u"receive-rate"],
+                                   tst_data[u"result"][u"receive-stdev"])
                         elif table[u"include-tests"] == u"PDR":
                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
                         elif table[u"include-tests"] == u"NDR":
@@ -1019,41 +1231,71 @@ def table_perf_comparison_nic(table, input_data):
                         pass
 
     tbl_lst = list()
-    footnote = False
     for tst_name in tbl_dict:
         item = [tbl_dict[tst_name][u"name"], ]
         if history:
             if tbl_dict[tst_name].get(u"history", None) is not None:
                 for hist_data in tbl_dict[tst_name][u"history"].values():
                     if hist_data:
-                        item.append(round(mean(hist_data) / 1000000, 2))
-                        item.append(round(stdev(hist_data) / 1000000, 2))
+                        if table[u"include-tests"] == u"MRR":
+                            item.append(round(hist_data[0][0] / 1e6, 1))
+                            item.append(round(hist_data[0][1] / 1e6, 1))
+                        else:
+                            item.append(round(mean(hist_data) / 1e6, 1))
+                            item.append(round(stdev(hist_data) / 1e6, 1))
                     else:
-                        item.extend([u"Not tested", u"Not tested"])
+                        item.extend([u"NT", u"NT"])
+            else:
+                item.extend([u"NT", u"NT"])
+        data_r = tbl_dict[tst_name][u"ref-data"]
+        if data_r:
+            if table[u"include-tests"] == u"MRR":
+                data_r_mean = data_r[0][0]
+                data_r_stdev = data_r[0][1]
             else:
-                item.extend([u"Not tested", u"Not tested"])
-        data_t = tbl_dict[tst_name][u"ref-data"]
-        if data_t:
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+                data_r_mean = mean(data_r)
+                data_r_stdev = stdev(data_r)
+            item.append(round(data_r_mean / 1e6, 1))
+            item.append(round(data_r_stdev / 1e6, 1))
         else:
-            item.extend([u"Not tested", u"Not tested"])
-        data_t = tbl_dict[tst_name][u"cmp-data"]
-        if data_t:
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+            data_r_mean = None
+            data_r_stdev = None
+            item.extend([u"NT", u"NT"])
+        data_c = tbl_dict[tst_name][u"cmp-data"]
+        if data_c:
+            if table[u"include-tests"] == u"MRR":
+                data_c_mean = data_c[0][0]
+                data_c_stdev = data_c[0][1]
+            else:
+                data_c_mean = mean(data_c)
+                data_c_stdev = stdev(data_c)
+            item.append(round(data_c_mean / 1e6, 1))
+            item.append(round(data_c_stdev / 1e6, 1))
         else:
-            item.extend([u"Not tested", u"Not tested"])
-        if item[-2] == u"Not tested":
+            data_c_mean = None
+            data_c_stdev = None
+            item.extend([u"NT", u"NT"])
+        if item[-2] == u"NT":
             pass
-        elif item[-4] == u"Not tested":
+        elif item[-4] == u"NT":
+            item.append(u"New in CSIT-2001")
             item.append(u"New in CSIT-2001")
-        # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
-        #     item.append(u"See footnote [1]")
-        #     footnote = True
-        elif item[-4] != 0:
-            item.append(int(relative_change(float(item[-4]), float(item[-2]))))
-        if (len(item) == len(header)) and (item[-3] != u"Not tested"):
+        elif data_r_mean is not None and data_c_mean is not None:
+            delta, d_stdev = relative_change_stdev(
+                data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
+            )
+            try:
+                item.append(round(delta))
+            except ValueError:
+                item.append(delta)
+            try:
+                item.append(round(d_stdev))
+            except ValueError:
+                item.append(d_stdev)
+        if rca_data:
+            rca_nr = rca_data.get(item[0], u"-")
+            item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+        if (len(item) == len(header)) and (item[-4] != u"NT"):
             tbl_lst.append(item)
 
     tbl_lst = _tpc_sort_table(tbl_lst)
@@ -1063,27 +1305,28 @@ def table_perf_comparison_nic(table, input_data):
     with open(csv_file, u"wt") as file_handler:
         file_handler.write(header_str)
         for test in tbl_lst:
-            file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+            file_handler.write(u";".join([str(item) for item in test]) + u"\n")
 
     txt_file_name = f"{table[u'output-file']}.txt"
-    convert_csv_to_pretty_txt(csv_file, txt_file_name)
-
-    if footnote:
-        with open(txt_file_name, u'a') as txt_file:
-            txt_file.writelines([
-                u"\nFootnotes:\n",
-                u"[1] CSIT-1908 changed test methodology of dot1q tests in "
-                u"2-node testbeds, dot1q encapsulation is now used on both "
-                u"links of SUT.\n",
-                u"    Previously dot1q was used only on a single link with the "
-                u"other link carrying untagged Ethernet frames. This changes "
-                u"results\n",
-                u"    in slightly lower throughput in CSIT-1908 for these "
-                u"tests. See release notes."
-            ])
+    convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
+
+    footnote = u""
+    with open(txt_file_name, u'a') as txt_file:
+        txt_file.write(legend)
+        if rca_data:
+            footnote = rca_data.get(u"footnote", u"")
+            if footnote:
+                txt_file.write(footnote)
+        txt_file.write(u":END")
 
     # Generate html table:
-    _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+    _tpc_generate_html_table(
+        header,
+        tbl_lst,
+        table[u'output-file'],
+        legend=legend,
+        footnote=footnote
+    )
 
 
 def table_nics_comparison(table, input_data):
@@ -1107,21 +1350,49 @@ def table_nics_comparison(table, input_data):
 
     # Prepare the header of the tables
     try:
-        header = [u"Test case", ]
-
-        if table[u"include-tests"] == u"MRR":
-            hdr_param = u"Rec Rate"
-        else:
-            hdr_param = u"Thput"
-
-        header.extend(
-            [
-                f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
-                f"{table[u'reference'][u'title']} Stdev [Mpps]",
-                f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
-                f"{table[u'compare'][u'title']} Stdev [Mpps]",
-                u"Delta [%]"
-            ]
+        header = [
+            u"Test Case",
+            f"{table[u'reference'][u'title']} "
+            f"Avg({table[u'include-tests']})",
+            f"{table[u'reference'][u'title']} "
+            f"Stdev({table[u'include-tests']})",
+            f"{table[u'compare'][u'title']} "
+            f"Avg({table[u'include-tests']})",
+            f"{table[u'compare'][u'title']} "
+            f"Stdev({table[u'include-tests']})",
+            f"Diff({table[u'reference'][u'title']},"
+            f"{table[u'compare'][u'title']})",
+            u"Stdev(Diff)"
+        ]
+        legend = (
+            u"\nLegend:\n"
+            f"{table[u'reference'][u'title']} "
+            f"Avg({table[u'include-tests']}): "
+            f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+            f"series of runs of the listed tests executed using "
+            f"{table[u'reference'][u'title']} NIC.\n"
+            f"{table[u'reference'][u'title']} "
+            f"Stdev({table[u'include-tests']}): "
+            f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+            f"computed from a series of runs of the listed tests executed "
+            f"using {table[u'reference'][u'title']} NIC.\n"
+            f"{table[u'compare'][u'title']} "
+            f"Avg({table[u'include-tests']}): "
+            f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+            f"series of runs of the listed tests executed using "
+            f"{table[u'compare'][u'title']} NIC.\n"
+            f"{table[u'compare'][u'title']} "
+            f"Stdev({table[u'include-tests']}): "
+            f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+            f"computed from a series of runs of the listed tests executed "
+            f"using {table[u'compare'][u'title']} NIC.\n"
+            f"Diff({table[u'reference'][u'title']},"
+            f"{table[u'compare'][u'title']}): "
+            f"Percentage change calculated for mean values.\n"
+            u"Stdev(Diff): "
+            u"Standard deviation of percentage change calculated for mean "
+            u"values.\n"
+            u":END"
         )
 
     except (AttributeError, KeyError) as err:
@@ -1142,9 +1413,9 @@ def table_nics_comparison(table, input_data):
                         u"cmp-data": list()
                     }
                 try:
-                    result = None
                     if table[u"include-tests"] == u"MRR":
-                        result = tst_data[u"result"][u"receive-rate"]
+                        result = (tst_data[u"result"][u"receive-rate"],
+                                  tst_data[u"result"][u"receive-stdev"])
                     elif table[u"include-tests"] == u"PDR":
                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
                     elif table[u"include-tests"] == u"NDR":
@@ -1165,21 +1436,46 @@ def table_nics_comparison(table, input_data):
     tbl_lst = list()
     for tst_name in tbl_dict:
         item = [tbl_dict[tst_name][u"name"], ]
-        data_t = tbl_dict[tst_name][u"ref-data"]
-        if data_t:
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+        data_r = tbl_dict[tst_name][u"ref-data"]
+        if data_r:
+            if table[u"include-tests"] == u"MRR":
+                data_r_mean = data_r[0][0]
+                data_r_stdev = data_r[0][1]
+            else:
+                data_r_mean = mean(data_r)
+                data_r_stdev = stdev(data_r)
+            item.append(round(data_r_mean / 1e6, 1))
+            item.append(round(data_r_stdev / 1e6, 1))
         else:
+            data_r_mean = None
+            data_r_stdev = None
             item.extend([None, None])
-        data_t = tbl_dict[tst_name][u"cmp-data"]
-        if data_t:
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+        data_c = tbl_dict[tst_name][u"cmp-data"]
+        if data_c:
+            if table[u"include-tests"] == u"MRR":
+                data_c_mean = data_c[0][0]
+                data_c_stdev = data_c[0][1]
+            else:
+                data_c_mean = mean(data_c)
+                data_c_stdev = stdev(data_c)
+            item.append(round(data_c_mean / 1e6, 1))
+            item.append(round(data_c_stdev / 1e6, 1))
         else:
+            data_c_mean = None
+            data_c_stdev = None
             item.extend([None, None])
-        if item[-4] is not None and item[-2] is not None and item[-4] != 0:
-            item.append(int(relative_change(float(item[-4]), float(item[-2]))))
-        if len(item) == len(header):
+        if data_r_mean is not None and data_c_mean is not None:
+            delta, d_stdev = relative_change_stdev(
+                data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
+            )
+            try:
+                item.append(round(delta))
+            except ValueError:
+                item.append(delta)
+            try:
+                item.append(round(d_stdev))
+            except ValueError:
+                item.append(d_stdev)
             tbl_lst.append(item)
 
     # Sort the table according to the relative change
@@ -1187,15 +1483,24 @@ def table_nics_comparison(table, input_data):
 
     # Generate csv tables:
     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
-        file_handler.write(u",".join(header) + u"\n")
+        file_handler.write(u";".join(header) + u"\n")
         for test in tbl_lst:
-            file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+            file_handler.write(u";".join([str(item) for item in test]) + u"\n")
 
     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
-                              f"{table[u'output-file']}.txt")
+                              f"{table[u'output-file']}.txt",
+                              delimiter=u";")
+
+    with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
+        txt_file.write(legend)
 
     # Generate html table:
-    _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+    _tpc_generate_html_table(
+        header,
+        tbl_lst,
+        table[u'output-file'],
+        legend=legend
+    )
 
 
 def table_soak_vs_ndr(table, input_data):
@@ -1220,14 +1525,37 @@ def table_soak_vs_ndr(table, input_data):
     # Prepare the header of the table
     try:
         header = [
-            u"Test case",
-            f"{table[u'reference'][u'title']} Thput [Mpps]",
-            f"{table[u'reference'][u'title']} Stdev [Mpps]",
-            f"{table[u'compare'][u'title']} Thput [Mpps]",
-            f"{table[u'compare'][u'title']} Stdev [Mpps]",
-            u"Delta [%]", u"Stdev of delta [%]"
+            u"Test Case",
+            f"Avg({table[u'reference'][u'title']})",
+            f"Stdev({table[u'reference'][u'title']})",
+            f"Avg({table[u'compare'][u'title']})",
+            f"Stdev{table[u'compare'][u'title']})",
+            u"Diff",
+            u"Stdev(Diff)"
         ]
-        header_str = u",".join(header) + u"\n"
+        header_str = u";".join(header) + u"\n"
+        legend = (
+            u"\nLegend:\n"
+            f"Avg({table[u'reference'][u'title']}): "
+            f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
+            f"from a series of runs of the listed tests.\n"
+            f"Stdev({table[u'reference'][u'title']}): "
+            f"Standard deviation value of {table[u'reference'][u'title']} "
+            f"[Mpps] computed from a series of runs of the listed tests.\n"
+            f"Avg({table[u'compare'][u'title']}): "
+            f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
+            f"a series of runs of the listed tests.\n"
+            f"Stdev({table[u'compare'][u'title']}): "
+            f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
+            f"computed from a series of runs of the listed tests.\n"
+            f"Diff({table[u'reference'][u'title']},"
+            f"{table[u'compare'][u'title']}): "
+            f"Percentage change calculated for mean values.\n"
+            u"Stdev(Diff): "
+            u"Standard deviation of percentage change calculated for mean "
+            u"values.\n"
+            u":END"
+        )
     except (AttributeError, KeyError) as err:
         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
         return
@@ -1270,7 +1598,8 @@ def table_soak_vs_ndr(table, input_data):
                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
                         continue
                     if table[u"include-tests"] == u"MRR":
-                        result = tst_data[u"result"][u"receive-rate"]
+                        result = (tst_data[u"result"][u"receive-rate"],
+                                  tst_data[u"result"][u"receive-stdev"])
                     elif table[u"include-tests"] == u"PDR":
                         result = \
                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
@@ -1290,29 +1619,43 @@ def table_soak_vs_ndr(table, input_data):
         item = [tbl_dict[tst_name][u"name"], ]
         data_r = tbl_dict[tst_name][u"ref-data"]
         if data_r:
-            data_r_mean = mean(data_r)
-            item.append(round(data_r_mean / 1000000, 2))
-            data_r_stdev = stdev(data_r)
-            item.append(round(data_r_stdev / 1000000, 2))
+            if table[u"include-tests"] == u"MRR":
+                data_r_mean = data_r[0][0]
+                data_r_stdev = data_r[0][1]
+            else:
+                data_r_mean = mean(data_r)
+                data_r_stdev = stdev(data_r)
+            item.append(round(data_r_mean / 1e6, 1))
+            item.append(round(data_r_stdev / 1e6, 1))
         else:
             data_r_mean = None
             data_r_stdev = None
             item.extend([None, None])
         data_c = tbl_dict[tst_name][u"cmp-data"]
         if data_c:
-            data_c_mean = mean(data_c)
-            item.append(round(data_c_mean / 1000000, 2))
-            data_c_stdev = stdev(data_c)
-            item.append(round(data_c_stdev / 1000000, 2))
+            if table[u"include-tests"] == u"MRR":
+                data_c_mean = data_c[0][0]
+                data_c_stdev = data_c[0][1]
+            else:
+                data_c_mean = mean(data_c)
+                data_c_stdev = stdev(data_c)
+            item.append(round(data_c_mean / 1e6, 1))
+            item.append(round(data_c_stdev / 1e6, 1))
         else:
             data_c_mean = None
             data_c_stdev = None
             item.extend([None, None])
-        if data_r_mean and data_c_mean:
+        if data_r_mean is not None and data_c_mean is not None:
             delta, d_stdev = relative_change_stdev(
                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
-            item.append(round(delta, 2))
-            item.append(round(d_stdev, 2))
+            try:
+                item.append(round(delta))
+            except ValueError:
+                item.append(delta)
+            try:
+                item.append(round(d_stdev))
+            except ValueError:
+                item.append(d_stdev)
             tbl_lst.append(item)
 
     # Sort the table according to the relative change
@@ -1323,12 +1666,21 @@ def table_soak_vs_ndr(table, input_data):
     with open(csv_file, u"wt") as file_handler:
         file_handler.write(header_str)
         for test in tbl_lst:
-            file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+            file_handler.write(u";".join([str(item) for item in test]) + u"\n")
 
-    convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
+    convert_csv_to_pretty_txt(
+        csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
+    )
+    with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
+        txt_file.write(legend)
 
     # Generate html table:
-    _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+    _tpc_generate_html_table(
+        header,
+        tbl_lst,
+        table[u'output-file'],
+        legend=legend
+    )
 
 
 def table_perf_trending_dash(table, input_data):
@@ -1424,7 +1776,7 @@ def table_perf_trending_dash(table, input_data):
                 continue
             tbl_lst.append(
                 [tbl_dict[tst_name][u"name"],
-                 round(last_avg / 1000000, 2),
+                 round(last_avg / 1e6, 2),
                  rel_change_last,
                  rel_change_long,
                  classification_lst[-win_size:].count(u"regression"),