PAL: Add debug output to table_weekly_comparison
[csit.git] / resources / tools / presentation / generator_tables.py
index fe0eaaa..82e59c4 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -17,6 +17,7 @@
 
 import logging
 import csv
 
 import logging
 import csv
+import math
 import re
 
 from collections import OrderedDict
 import re
 
 from collections import OrderedDict
@@ -24,11 +25,11 @@ from xml.etree import ElementTree as ET
 from datetime import datetime as dt
 from datetime import timedelta
 from copy import deepcopy
 from datetime import datetime as dt
 from datetime import timedelta
 from copy import deepcopy
-from json import loads
 
 import plotly.graph_objects as go
 import plotly.offline as ploff
 import pandas as pd
 
 import plotly.graph_objects as go
 import plotly.offline as ploff
 import pandas as pd
+import prettytable
 
 from numpy import nan, isnan
 from yaml import load, FullLoader, YAMLError
 
 from numpy import nan, isnan
 from yaml import load, FullLoader, YAMLError
@@ -39,6 +40,8 @@ from pal_utils import mean, stdev, classify_anomalies, \
 
 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
 
 
 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
 
+NORM_FREQ = 2.0  # [GHz]
+
 
 def generate_tables(spec, data):
     """Generate all tables specified in the specification file.
 
 def generate_tables(spec, data):
     """Generate all tables specified in the specification file.
@@ -50,30 +53,128 @@ def generate_tables(spec, data):
     """
 
     generator = {
     """
 
     generator = {
-        u"table_merged_details": table_merged_details,
-        u"table_soak_vs_ndr": table_soak_vs_ndr,
-        u"table_perf_trending_dash": table_perf_trending_dash,
-        u"table_perf_trending_dash_html": table_perf_trending_dash_html,
-        u"table_last_failed_tests": table_last_failed_tests,
-        u"table_failed_tests": table_failed_tests,
-        u"table_failed_tests_html": table_failed_tests_html,
-        u"table_oper_data_html": table_oper_data_html,
-        u"table_comparison": table_comparison,
-        u"table_weekly_comparison": table_weekly_comparison
+        "table_merged_details": table_merged_details,
+        "table_soak_vs_ndr": table_soak_vs_ndr,
+        "table_perf_trending_dash": table_perf_trending_dash,
+        "table_perf_trending_dash_html": table_perf_trending_dash_html,
+        "table_last_failed_tests": table_last_failed_tests,
+        "table_failed_tests": table_failed_tests,
+        "table_failed_tests_html": table_failed_tests_html,
+        "table_oper_data_html": table_oper_data_html,
+        "table_comparison": table_comparison,
+        "table_weekly_comparison": table_weekly_comparison,
+        "table_job_spec_duration": table_job_spec_duration
     }
 
     logging.info(u"Generating the tables ...")
     }
 
     logging.info(u"Generating the tables ...")
+
+    norm_factor = dict()
+    for key, val in spec.environment.get("frequency", dict()).items():
+        norm_factor[key] = NORM_FREQ / val
+
     for table in spec.tables:
         try:
     for table in spec.tables:
         try:
-            if table[u"algorithm"] == u"table_weekly_comparison":
-                table[u"testbeds"] = spec.environment.get(u"testbeds", None)
-            generator[table[u"algorithm"]](table, data)
+            if table["algorithm"] == "table_weekly_comparison":
+                table["testbeds"] = spec.environment.get("testbeds", None)
+            if table["algorithm"] == "table_comparison":
+                table["norm_factor"] = norm_factor
+            generator[table["algorithm"]](table, data)
         except NameError as err:
             logging.error(
         except NameError as err:
             logging.error(
-                f"Probably algorithm {table[u'algorithm']} is not defined: "
+                f"Probably algorithm {table['algorithm']} is not defined: "
                 f"{repr(err)}"
             )
                 f"{repr(err)}"
             )
-    logging.info(u"Done.")
+    logging.info("Done.")
+
+
+def table_job_spec_duration(table, input_data):
+    """Generate the table(s) with algorithm: table_job_spec_duration
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    _ = input_data
+
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
+
+    jb_type = table.get(u"jb-type", None)
+
+    tbl_lst = list()
+    if jb_type == u"iterative":
+        for line in table.get(u"lines", tuple()):
+            tbl_itm = {
+                u"name": line.get(u"job-spec", u""),
+                u"data": list()
+            }
+            for job, builds in line.get(u"data-set", dict()).items():
+                for build_nr in builds:
+                    try:
+                        minutes = input_data.metadata(
+                            job, str(build_nr)
+                        )[u"elapsedtime"] // 60000
+                    except (KeyError, IndexError, ValueError, AttributeError):
+                        continue
+                    tbl_itm[u"data"].append(minutes)
+            tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
+            tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
+            tbl_lst.append(tbl_itm)
+    elif jb_type == u"coverage":
+        job = table.get(u"data", None)
+        if not job:
+            return
+        for line in table.get(u"lines", tuple()):
+            try:
+                tbl_itm = {
+                    u"name": line.get(u"job-spec", u""),
+                    u"mean": input_data.metadata(
+                        list(job.keys())[0], str(line[u"build"])
+                    )[u"elapsedtime"] // 60000,
+                    u"stdev": float(u"nan")
+                }
+                tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
+            except (KeyError, IndexError, ValueError, AttributeError):
+                continue
+            tbl_lst.append(tbl_itm)
+    else:
+        logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
+        return
+
+    for line in tbl_lst:
+        line[u"mean"] = \
+            f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
+        if math.isnan(line[u"stdev"]):
+            line[u"stdev"] = u""
+        else:
+            line[u"stdev"] = \
+                f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
+
+    if not tbl_lst:
+        return
+
+    rows = list()
+    for itm in tbl_lst:
+        rows.append([
+            itm[u"name"],
+            f"{len(itm[u'data'])}",
+            f"{itm[u'mean']} +- {itm[u'stdev']}"
+            if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
+        ])
+
+    txt_table = prettytable.PrettyTable(
+        [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
+    )
+    for row in rows:
+        txt_table.add_row(row)
+    txt_table.align = u"r"
+    txt_table.align[u"Job Specification"] = u"l"
+
+    file_name = f"{table.get(u'output-file', u'')}.txt"
+    with open(file_name, u"wt", encoding='utf-8') as txt_file:
+        txt_file.write(str(txt_table))
 
 
 def table_oper_data_html(table, input_data):
 
 
 def table_oper_data_html(table, input_data):
@@ -205,28 +306,15 @@ def table_oper_data_html(table, input_data):
             threads = dict({idx: list() for idx in range(len(runtime))})
             for idx, run_data in runtime.items():
                 for gnode, gdata in run_data.items():
             threads = dict({idx: list() for idx in range(len(runtime))})
             for idx, run_data in runtime.items():
                 for gnode, gdata in run_data.items():
-                    if gdata[u"vectors"] > 0:
-                        clocks = gdata[u"clocks"] / gdata[u"vectors"]
-                    elif gdata[u"calls"] > 0:
-                        clocks = gdata[u"clocks"] / gdata[u"calls"]
-                    elif gdata[u"suspends"] > 0:
-                        clocks = gdata[u"clocks"] / gdata[u"suspends"]
-                    else:
-                        clocks = 0.0
-                    if gdata[u"calls"] > 0:
-                        vectors_call = gdata[u"vectors"] / gdata[u"calls"]
-                    else:
-                        vectors_call = 0.0
-                    if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
-                            int(gdata[u"suspends"]):
-                        threads[idx].append([
-                            gnode,
-                            int(gdata[u"calls"]),
-                            int(gdata[u"vectors"]),
-                            int(gdata[u"suspends"]),
-                            clocks,
-                            vectors_call
-                        ])
+                    threads[idx].append([
+                        gnode,
+                        int(gdata[u"calls"]),
+                        int(gdata[u"vectors"]),
+                        int(gdata[u"suspends"]),
+                        float(gdata[u"clocks"]),
+                        float(gdata[u"vectors"] / gdata[u"calls"]) \
+                            if gdata[u"calls"] else 0.0
+                    ])
 
             bold = ET.SubElement(tcol, u"b")
             bold.text = (
 
             bold = ET.SubElement(tcol, u"b")
             bold.text = (
@@ -484,6 +572,22 @@ def _tpc_insert_data(target, src, include_tests):
                 target[u"data"].append(
                     float(u"nan") if lat == -1 else lat * 1e6
                 )
                 target[u"data"].append(
                     float(u"nan") if lat == -1 else lat * 1e6
                 )
+        elif include_tests == u"hoststack":
+            try:
+                target[u"data"].append(
+                    float(src[u"result"][u"bits_per_second"])
+                )
+            except KeyError:
+                target[u"data"].append(
+                    (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
+                    ((float(src[u"result"][u"client"][u"time"]) +
+                      float(src[u"result"][u"server"][u"time"])) / 2)
+                )
+        elif include_tests == u"vsap":
+            try:
+                target[u"data"].append(src[u"result"][u"cps"])
+            except KeyError:
+                target[u"data"].append(src[u"result"][u"rps"])
     except (KeyError, TypeError):
         pass
 
     except (KeyError, TypeError):
         pass
 
@@ -880,7 +984,7 @@ def table_perf_trending_dash(table, input_data):
     header = [
         u"Test Case",
         u"Trend [Mpps]",
     header = [
         u"Test Case",
         u"Trend [Mpps]",
-        u"Short-Term Change [%]",
+        u"Runs [#]",
         u"Long-Term Change [%]",
         u"Regressions [#]",
         u"Progressions [#]"
         u"Long-Term Change [%]",
         u"Regressions [#]",
         u"Progressions [#]"
@@ -942,6 +1046,13 @@ def table_perf_trending_dash(table, input_data):
         last_avg = avgs[-1]
         avg_week_ago = avgs[max(-win_size, -len(avgs))]
 
         last_avg = avgs[-1]
         avg_week_ago = avgs[max(-win_size, -len(avgs))]
 
+        nr_of_last_avgs = 0;
+        for x in reversed(avgs):
+            if x == last_avg:
+                nr_of_last_avgs += 1
+            else:
+                break
+
         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
             rel_change_last = nan
         else:
         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
             rel_change_last = nan
         else:
@@ -963,28 +1074,23 @@ def table_perf_trending_dash(table, input_data):
             tbl_lst.append(
                 [tbl_dict[tst_name][u"name"],
                  round(last_avg / 1e6, 2),
             tbl_lst.append(
                 [tbl_dict[tst_name][u"name"],
                  round(last_avg / 1e6, 2),
-                 rel_change_last,
+                 nr_of_last_avgs,
                  rel_change_long,
                  classification_lst[-win_size+1:].count(u"regression"),
                  classification_lst[-win_size+1:].count(u"progression")])
 
     tbl_lst.sort(key=lambda rel: rel[0])
                  rel_change_long,
                  classification_lst[-win_size+1:].count(u"regression"),
                  classification_lst[-win_size+1:].count(u"progression")])
 
     tbl_lst.sort(key=lambda rel: rel[0])
-    tbl_lst.sort(key=lambda rel: rel[3])
     tbl_lst.sort(key=lambda rel: rel[2])
     tbl_lst.sort(key=lambda rel: rel[2])
-
-    tbl_sorted = list()
-    for nrr in range(table[u"window"], -1, -1):
-        tbl_reg = [item for item in tbl_lst if item[4] == nrr]
-        for nrp in range(table[u"window"], -1, -1):
-            tbl_out = [item for item in tbl_reg if item[5] == nrp]
-            tbl_sorted.extend(tbl_out)
+    tbl_lst.sort(key=lambda rel: rel[3])
+    tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
+    tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
 
     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
 
     logging.info(f"    Writing file: {file_name}")
     with open(file_name, u"wt") as file_handler:
         file_handler.write(header_str)
 
     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
 
     logging.info(f"    Writing file: {file_name}")
     with open(file_name, u"wt") as file_handler:
         file_handler.write(header_str)
-        for test in tbl_sorted:
+        for test in tbl_lst:
             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
 
     logging.info(f"    Writing file: {table[u'output-file']}.txt")
             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
 
     logging.info(f"    Writing file: {table[u'output-file']}.txt")
@@ -1019,6 +1125,8 @@ def _generate_url(testbed, test_name):
         nic = u"x553"
     elif u"cx556" in test_name or u"cx556a" in test_name:
         nic = u"cx556a"
         nic = u"x553"
     elif u"cx556" in test_name or u"cx556a" in test_name:
         nic = u"cx556a"
+    elif u"ena" in test_name:
+        nic = u"nitro50g"
     else:
         nic = u""
 
     else:
         nic = u""
 
@@ -1051,15 +1159,21 @@ def _generate_url(testbed, test_name):
         cores = u"4t4c"
     elif u"2t1c" in test_name or \
          (u"-1c-" in test_name and
         cores = u"4t4c"
     elif u"2t1c" in test_name or \
          (u"-1c-" in test_name and
-          testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+          testbed in
+          (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+           u"2n-aws", u"3n-aws")):
         cores = u"2t1c"
     elif u"4t2c" in test_name or \
          (u"-2c-" in test_name and
         cores = u"2t1c"
     elif u"4t2c" in test_name or \
          (u"-2c-" in test_name and
-          testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+          testbed in
+          (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+           u"2n-aws", u"3n-aws")):
         cores = u"4t2c"
     elif u"8t4c" in test_name or \
          (u"-4c-" in test_name and
         cores = u"4t2c"
     elif u"8t4c" in test_name or \
          (u"-4c-" in test_name and
-          testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+          testbed in
+          (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+           u"2n-aws", u"3n-aws")):
         cores = u"8t4c"
     else:
         cores = u""
         cores = u"8t4c"
     else:
         cores = u""
@@ -1070,10 +1184,14 @@ def _generate_url(testbed, test_name):
         driver = u"l3fwd"
     elif u"avf" in test_name:
         driver = u"avf"
         driver = u"l3fwd"
     elif u"avf" in test_name:
         driver = u"avf"
+    elif u"af-xdp" in test_name or u"af_xdp" in test_name:
+        driver = u"af_xdp"
     elif u"rdma" in test_name:
         driver = u"rdma"
     elif u"dnv" in testbed or u"tsh" in testbed:
         driver = u"ixgbe"
     elif u"rdma" in test_name:
         driver = u"rdma"
     elif u"dnv" in testbed or u"tsh" in testbed:
         driver = u"ixgbe"
+    elif u"ena" in test_name:
+        driver = u"ena"
     else:
         driver = u"dpdk"
 
     else:
         driver = u"dpdk"
 
@@ -1178,6 +1296,8 @@ def _generate_url(testbed, test_name):
             bsf += u"-sw"
         elif u"hw" in test_name:
             bsf += u"-hw"
             bsf += u"-sw"
         elif u"hw" in test_name:
             bsf += u"-hw"
+        elif u"spe" in test_name:
+            bsf += u"-spe"
     elif u"ethip4vxlan" in test_name:
         domain = u"ip4_tunnels"
     elif u"ethip4udpgeneve" in test_name:
     elif u"ethip4vxlan" in test_name:
         domain = u"ip4_tunnels"
     elif u"ethip4udpgeneve" in test_name:
@@ -1374,7 +1494,11 @@ def table_last_failed_tests(table, input_data):
                 if not groups:
                     continue
                 nic = groups.group(0)
                 if not groups:
                     continue
                 nic = groups.group(0)
-                failed_tests.append(f"{nic}-{tst_data[u'name']}")
+                msg = tst_data[u'msg'].replace(u"\n", u"")
+                msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
+                             'xxx.xxx.xxx.xxx', msg)
+                msg = msg.split(u'Also teardown failed')[0]
+                failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
             tbl_list.append(passed)
             tbl_list.append(failed)
             tbl_list.append(duration)
             tbl_list.append(passed)
             tbl_list.append(failed)
             tbl_list.append(duration)
@@ -1607,81 +1731,81 @@ def table_comparison(table, input_data):
     :type table: pandas.Series
     :type input_data: InputData
     """
     :type table: pandas.Series
     :type input_data: InputData
     """
-    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
+    logging.info(f"  Generating the table {table.get('title', '')} ...")
 
     # Transform the data
     logging.info(
 
     # Transform the data
     logging.info(
-        f"    Creating the data set for the {table.get(u'type', u'')} "
-        f"{table.get(u'title', u'')}."
+        f"    Creating the data set for the {table.get('type', '')} "
+        f"{table.get('title', '')}."
     )
 
     )
 
-    columns = table.get(u"columns", None)
+    columns = table.get("columns", None)
     if not columns:
         logging.error(
     if not columns:
         logging.error(
-            f"No columns specified for {table.get(u'title', u'')}. Skipping."
+            f"No columns specified for {table.get('title', '')}. Skipping."
         )
         return
 
     cols = list()
     for idx, col in enumerate(columns):
         )
         return
 
     cols = list()
     for idx, col in enumerate(columns):
-        if col.get(u"data-set", None) is None:
-            logging.warning(f"No data for column {col.get(u'title', u'')}")
+        if col.get("data-set", None) is None:
+            logging.warning(f"No data for column {col.get('title', '')}")
             continue
             continue
-        tag = col.get(u"tag", None)
+        tag = col.get("tag", None)
         data = input_data.filter_data(
             table,
             params=[
         data = input_data.filter_data(
             table,
             params=[
-                u"throughput",
-                u"result",
-                u"latency",
-                u"name",
-                u"parent",
-                u"tags"
+                "throughput",
+                "result",
+                "latency",
+                "name",
+                "parent",
+                "tags"
             ],
             ],
-            data=col[u"data-set"],
+            data=col["data-set"],
             continue_on_error=True
         )
         col_data = {
             continue_on_error=True
         )
         col_data = {
-            u"title": col.get(u"title", f"Column{idx}"),
-            u"data": dict()
+            "title": col.get("title", f"Column{idx}"),
+            "data": dict()
         }
         for builds in data.values:
             for build in builds:
                 for tst_name, tst_data in build.items():
         }
         for builds in data.values:
             for build in builds:
                 for tst_name, tst_data in build.items():
-                    if tag and tag not in tst_data[u"tags"]:
+                    if tag and tag not in tst_data["tags"]:
                         continue
                     tst_name_mod = \
                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
                         continue
                     tst_name_mod = \
                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
-                        replace(u"2n1l-", u"")
-                    if col_data[u"data"].get(tst_name_mod, None) is None:
-                        name = tst_data[u'name'].rsplit(u'-', 1)[0]
-                        if u"across testbeds" in table[u"title"].lower() or \
-                                u"across topologies" in table[u"title"].lower():
+                        replace("2n1l-", "")
+                    if col_data["data"].get(tst_name_mod, None) is None:
+                        name = tst_data['name'].rsplit('-', 1)[0]
+                        if "across testbeds" in table["title"].lower() or \
+                                "across topologies" in table["title"].lower():
                             name = _tpc_modify_displayed_test_name(name)
                             name = _tpc_modify_displayed_test_name(name)
-                        col_data[u"data"][tst_name_mod] = {
-                            u"name": name,
-                            u"replace": True,
-                            u"data": list(),
-                            u"mean": None,
-                            u"stdev": None
+                        col_data["data"][tst_name_mod] = {
+                            "name": name,
+                            "replace": True,
+                            "data": list(),
+                            "mean": None,
+                            "stdev": None
                         }
                     _tpc_insert_data(
                         }
                     _tpc_insert_data(
-                        target=col_data[u"data"][tst_name_mod],
+                        target=col_data["data"][tst_name_mod],
                         src=tst_data,
                         src=tst_data,
-                        include_tests=table[u"include-tests"]
+                        include_tests=table["include-tests"]
                     )
 
                     )
 
-        replacement = col.get(u"data-replacement", None)
+        replacement = col.get("data-replacement", None)
         if replacement:
             rpl_data = input_data.filter_data(
                 table,
                 params=[
         if replacement:
             rpl_data = input_data.filter_data(
                 table,
                 params=[
-                    u"throughput",
-                    u"result",
-                    u"latency",
-                    u"name",
-                    u"parent",
-                    u"tags"
+                    "throughput",
+                    "result",
+                    "latency",
+                    "name",
+                    "parent",
+                    "tags"
                 ],
                 data=replacement,
                 continue_on_error=True
                 ],
                 data=replacement,
                 continue_on_error=True
@@ -1689,56 +1813,56 @@ def table_comparison(table, input_data):
             for builds in rpl_data.values:
                 for build in builds:
                     for tst_name, tst_data in build.items():
             for builds in rpl_data.values:
                 for build in builds:
                     for tst_name, tst_data in build.items():
-                        if tag and tag not in tst_data[u"tags"]:
+                        if tag and tag not in tst_data["tags"]:
                             continue
                         tst_name_mod = \
                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
                             continue
                         tst_name_mod = \
                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
-                            replace(u"2n1l-", u"")
-                        if col_data[u"data"].get(tst_name_mod, None) is None:
-                            name = tst_data[u'name'].rsplit(u'-', 1)[0]
-                            if u"across testbeds" in table[u"title"].lower() \
-                                    or u"across topologies" in \
-                                    table[u"title"].lower():
+                            replace("2n1l-", "")
+                        if col_data["data"].get(tst_name_mod, None) is None:
+                            name = tst_data['name'].rsplit('-', 1)[0]
+                            if "across testbeds" in table["title"].lower() \
+                                    or "across topologies" in \
+                                    table["title"].lower():
                                 name = _tpc_modify_displayed_test_name(name)
                                 name = _tpc_modify_displayed_test_name(name)
-                            col_data[u"data"][tst_name_mod] = {
-                                u"name": name,
-                                u"replace": False,
-                                u"data": list(),
-                                u"mean": None,
-                                u"stdev": None
+                            col_data["data"][tst_name_mod] = {
+                                "name": name,
+                                "replace": False,
+                                "data": list(),
+                                "mean": None,
+                                "stdev": None
                             }
                             }
-                        if col_data[u"data"][tst_name_mod][u"replace"]:
-                            col_data[u"data"][tst_name_mod][u"replace"] = False
-                            col_data[u"data"][tst_name_mod][u"data"] = list()
+                        if col_data["data"][tst_name_mod]["replace"]:
+                            col_data["data"][tst_name_mod]["replace"] = False
+                            col_data["data"][tst_name_mod]["data"] = list()
                         _tpc_insert_data(
                         _tpc_insert_data(
-                            target=col_data[u"data"][tst_name_mod],
+                            target=col_data["data"][tst_name_mod],
                             src=tst_data,
                             src=tst_data,
-                            include_tests=table[u"include-tests"]
+                            include_tests=table["include-tests"]
                         )
 
                         )
 
-        if table[u"include-tests"] in (u"NDR", u"PDR") or \
-                u"latency" in table[u"include-tests"]:
-            for tst_name, tst_data in col_data[u"data"].items():
-                if tst_data[u"data"]:
-                    tst_data[u"mean"] = mean(tst_data[u"data"])
-                    tst_data[u"stdev"] = stdev(tst_data[u"data"])
+        if table["include-tests"] in ("NDR", "PDR", "hoststack", "vsap") \
+                or "latency" in table["include-tests"]:
+            for tst_name, tst_data in col_data["data"].items():
+                if tst_data["data"]:
+                    tst_data["mean"] = mean(tst_data["data"])
+                    tst_data["stdev"] = stdev(tst_data["data"])
 
         cols.append(col_data)
 
     tbl_dict = dict()
     for col in cols:
 
         cols.append(col_data)
 
     tbl_dict = dict()
     for col in cols:
-        for tst_name, tst_data in col[u"data"].items():
+        for tst_name, tst_data in col["data"].items():
             if tbl_dict.get(tst_name, None) is None:
                 tbl_dict[tst_name] = {
             if tbl_dict.get(tst_name, None) is None:
                 tbl_dict[tst_name] = {
-                    "name": tst_data[u"name"]
+                    "name": tst_data["name"]
                 }
                 }
-            tbl_dict[tst_name][col[u"title"]] = {
-                u"mean": tst_data[u"mean"],
-                u"stdev": tst_data[u"stdev"]
+            tbl_dict[tst_name][col["title"]] = {
+                "mean": tst_data["mean"],
+                "stdev": tst_data["stdev"]
             }
 
     if not tbl_dict:
             }
 
     if not tbl_dict:
-        logging.warning(f"No data for table {table.get(u'title', u'')}!")
+        logging.warning(f"No data for table {table.get('title', '')}!")
         return
 
     tbl_lst = list()
         return
 
     tbl_lst = list()
@@ -1748,15 +1872,15 @@ def table_comparison(table, input_data):
             row.append(tst_data.get(col[u"title"], None))
         tbl_lst.append(row)
 
             row.append(tst_data.get(col[u"title"], None))
         tbl_lst.append(row)
 
-    comparisons = table.get(u"comparisons", None)
+    comparisons = table.get("comparisons", None)
     rcas = list()
     if comparisons and isinstance(comparisons, list):
         for idx, comp in enumerate(comparisons):
             try:
     rcas = list()
     if comparisons and isinstance(comparisons, list):
         for idx, comp in enumerate(comparisons):
             try:
-                col_ref = int(comp[u"reference"])
-                col_cmp = int(comp[u"compare"])
+                col_ref = int(comp["reference"])
+                col_cmp = int(comp["compare"])
             except KeyError:
             except KeyError:
-                logging.warning(u"Comparison: No references defined! Skipping.")
+                logging.warning("Comparison: No references defined! Skipping.")
                 comparisons.pop(idx)
                 continue
             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
                 comparisons.pop(idx)
                 continue
             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
@@ -1765,14 +1889,14 @@ def table_comparison(table, input_data):
                                 f"and/or compare={col_cmp}. Skipping.")
                 comparisons.pop(idx)
                 continue
                                 f"and/or compare={col_cmp}. Skipping.")
                 comparisons.pop(idx)
                 continue
-            rca_file_name = comp.get(u"rca-file", None)
+            rca_file_name = comp.get("rca-file", None)
             if rca_file_name:
                 try:
             if rca_file_name:
                 try:
-                    with open(rca_file_name, u"r") as file_handler:
+                    with open(rca_file_name, "r") as file_handler:
                         rcas.append(
                             {
                         rcas.append(
                             {
-                                u"title": f"RCA{idx + 1}",
-                                u"data": load(file_handler, Loader=FullLoader)
+                                "title": f"RCA{idx + 1}",
+                                "data": load(file_handler, Loader=FullLoader)
                             }
                         )
                 except (YAMLError, IOError) as err:
                             }
                         )
                 except (YAMLError, IOError) as err:
@@ -1792,28 +1916,38 @@ def table_comparison(table, input_data):
         for row in tbl_lst:
             new_row = deepcopy(row)
             for comp in comparisons:
         for row in tbl_lst:
             new_row = deepcopy(row)
             for comp in comparisons:
-                ref_itm = row[int(comp[u"reference"])]
+                ref_itm = row[int(comp["reference"])]
                 if ref_itm is None and \
                 if ref_itm is None and \
-                        comp.get(u"reference-alt", None) is not None:
-                    ref_itm = row[int(comp[u"reference-alt"])]
+                        comp.get("reference-alt", None) is not None:
+                    ref_itm = row[int(comp["reference-alt"])]
                 cmp_itm = row[int(comp[u"compare"])]
                 if ref_itm is not None and cmp_itm is not None and \
                 cmp_itm = row[int(comp[u"compare"])]
                 if ref_itm is not None and cmp_itm is not None and \
-                        ref_itm[u"mean"] is not None and \
-                        cmp_itm[u"mean"] is not None and \
-                        ref_itm[u"stdev"] is not None and \
-                        cmp_itm[u"stdev"] is not None:
+                        ref_itm["mean"] is not None and \
+                        cmp_itm["mean"] is not None and \
+                        ref_itm["stdev"] is not None and \
+                        cmp_itm["stdev"] is not None:
+                    norm_factor_ref = table["norm_factor"].get(
+                        comp.get("norm-ref", ""),
+                        1.0
+                    )
+                    norm_factor_cmp = table["norm_factor"].get(
+                        comp.get("norm-cmp", ""),
+                        1.0
+                    )
                     try:
                         delta, d_stdev = relative_change_stdev(
                     try:
                         delta, d_stdev = relative_change_stdev(
-                            ref_itm[u"mean"], cmp_itm[u"mean"],
-                            ref_itm[u"stdev"], cmp_itm[u"stdev"]
+                            ref_itm["mean"] * norm_factor_ref,
+                            cmp_itm["mean"] * norm_factor_cmp,
+                            ref_itm["stdev"] * norm_factor_ref,
+                            cmp_itm["stdev"] * norm_factor_cmp
                         )
                     except ZeroDivisionError:
                         break
                         )
                     except ZeroDivisionError:
                         break
-                    if delta in (None, float(u"nan"), u"nan", u"NaN"):
+                    if delta is None or math.isnan(delta):
                         break
                     new_row.append({
                         break
                     new_row.append({
-                        u"mean": delta * 1e6,
-                        u"stdev": d_stdev * 1e6
+                        "mean": delta * 1e6,
+                        "stdev": d_stdev * 1e6
                     })
                 else:
                     break
                     })
                 else:
                     break
@@ -1822,7 +1956,7 @@ def table_comparison(table, input_data):
 
     try:
         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
 
     try:
         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
-        tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
+        tbl_cmp_lst.sort(key=lambda rel: rel[-1]['mean'], reverse=True)
     except TypeError as err:
         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
 
     except TypeError as err:
         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
 
@@ -1831,62 +1965,62 @@ def table_comparison(table, input_data):
         row = [line[0], ]
         for idx, itm in enumerate(line[1:]):
             if itm is None or not isinstance(itm, dict) or\
         row = [line[0], ]
         for idx, itm in enumerate(line[1:]):
             if itm is None or not isinstance(itm, dict) or\
-                    itm.get(u'mean', None) is None or \
-                    itm.get(u'stdev', None) is None:
-                row.append(u"NT")
-                row.append(u"NT")
+                    itm.get('mean', None) is None or \
+                    itm.get('stdev', None) is None:
+                row.append("NT")
+                row.append("NT")
             else:
             else:
-                row.append(round(float(itm[u'mean']) / 1e6, 3))
-                row.append(round(float(itm[u'stdev']) / 1e6, 3))
+                row.append(round(float(itm['mean']) / 1e6, 3))
+                row.append(round(float(itm['stdev']) / 1e6, 3))
         for rca in rcas:
             if rca is None:
                 continue
         for rca in rcas:
             if rca is None:
                 continue
-            rca_nr = rca[u"data"].get(row[0], u"-")
-            row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+            rca_nr = rca["data"].get(row[0], "-")
+            row.append(f"[{rca_nr}]" if rca_nr != "-" else "-")
         tbl_for_csv.append(row)
 
         tbl_for_csv.append(row)
 
-    header_csv = [u"Test Case", ]
+    header_csv = ["Test Case", ]
     for col in cols:
     for col in cols:
-        header_csv.append(f"Avg({col[u'title']})")
-        header_csv.append(f"Stdev({col[u'title']})")
+        header_csv.append(f"Avg({col['title']})")
+        header_csv.append(f"Stdev({col['title']})")
     for comp in comparisons:
         header_csv.append(
     for comp in comparisons:
         header_csv.append(
-            f"Avg({comp.get(u'title', u'')})"
+            f"Avg({comp.get('title', '')})"
         )
         header_csv.append(
         )
         header_csv.append(
-            f"Stdev({comp.get(u'title', u'')})"
+            f"Stdev({comp.get('title', '')})"
         )
     for rca in rcas:
         if rca:
         )
     for rca in rcas:
         if rca:
-            header_csv.append(rca[u"title"])
+            header_csv.append(rca["title"])
 
 
-    legend_lst = table.get(u"legend", None)
+    legend_lst = table.get("legend", None)
     if legend_lst is None:
     if legend_lst is None:
-        legend = u""
+        legend = ""
     else:
     else:
-        legend = u"\n" + u"\n".join(legend_lst) + u"\n"
+        legend = "\n" + "\n".join(legend_lst) + "\n"
 
 
-    footnote = u""
+    footnote = ""
     if rcas and any(rcas):
     if rcas and any(rcas):
-        footnote += u"\nRoot Cause Analysis:\n"
+        footnote += "\nRoot Cause Analysis:\n"
         for rca in rcas:
             if rca:
         for rca in rcas:
             if rca:
-                footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
+                footnote += f"{rca['data'].get('footnote', '')}\n"
 
 
-    csv_file_name = f"{table[u'output-file']}-csv.csv"
-    with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
+    csv_file_name = f"{table['output-file']}-csv.csv"
+    with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
         file_handler.write(
         file_handler.write(
-            u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
+            ",".join([f'"{itm}"' for itm in header_csv]) + "\n"
         )
         for test in tbl_for_csv:
             file_handler.write(
         )
         for test in tbl_for_csv:
             file_handler.write(
-                u",".join([f'"{item}"' for item in test]) + u"\n"
+                ",".join([f'"{item}"' for item in test]) + "\n"
             )
         if legend_lst:
             for item in legend_lst:
                 file_handler.write(f'"{item}"\n')
         if footnote:
             )
         if legend_lst:
             for item in legend_lst:
                 file_handler.write(f'"{item}"\n')
         if footnote:
-            for itm in footnote.split(u"\n"):
+            for itm in footnote.split("\n"):
                 file_handler.write(f'"{itm}"\n')
 
     tbl_tmp = list()
                 file_handler.write(f'"{itm}"\n')
 
     tbl_tmp = list()
@@ -1895,77 +2029,77 @@ def table_comparison(table, input_data):
         row = [line[0], ]
         for idx, itm in enumerate(line[1:]):
             if itm is None or not isinstance(itm, dict) or \
         row = [line[0], ]
         for idx, itm in enumerate(line[1:]):
             if itm is None or not isinstance(itm, dict) or \
-                    itm.get(u'mean', None) is None or \
-                    itm.get(u'stdev', None) is None:
-                new_itm = u"NT"
+                    itm.get('mean', None) is None or \
+                    itm.get('stdev', None) is None:
+                new_itm = "NT"
             else:
                 if idx < len(cols):
                     new_itm = (
             else:
                 if idx < len(cols):
                     new_itm = (
-                        f"{round(float(itm[u'mean']) / 1e6, 1)} "
-                        f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
-                        replace(u"nan", u"NaN")
+                        f"{round(float(itm['mean']) / 1e6, 2)} "
+                        f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
+                        replace("nan", "NaN")
                     )
                 else:
                     new_itm = (
                     )
                 else:
                     new_itm = (
-                        f"{round(float(itm[u'mean']) / 1e6, 1):+} "
-                        f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
-                        replace(u"nan", u"NaN")
+                        f"{round(float(itm['mean']) / 1e6, 2):+} "
+                        f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
+                        replace("nan", "NaN")
                     )
                     )
-            if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
-                max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
+            if len(new_itm.rsplit(" ", 1)[-1]) > max_lens[idx]:
+                max_lens[idx] = len(new_itm.rsplit(" ", 1)[-1])
             row.append(new_itm)
 
         tbl_tmp.append(row)
 
             row.append(new_itm)
 
         tbl_tmp.append(row)
 
-    header = [u"Test Case", ]
-    header.extend([col[u"title"] for col in cols])
-    header.extend([comp.get(u"title", u"") for comp in comparisons])
+    header = ["Test Case", ]
+    header.extend([col["title"] for col in cols])
+    header.extend([comp.get("title", "") for comp in comparisons])
 
     tbl_final = list()
     for line in tbl_tmp:
         row = [line[0], ]
         for idx, itm in enumerate(line[1:]):
 
     tbl_final = list()
     for line in tbl_tmp:
         row = [line[0], ]
         for idx, itm in enumerate(line[1:]):
-            if itm in (u"NT", u"NaN"):
+            if itm in ("NT", "NaN"):
                 row.append(itm)
                 continue
                 row.append(itm)
                 continue
-            itm_lst = itm.rsplit(u"\u00B1", 1)
+            itm_lst = itm.rsplit("\u00B1", 1)
             itm_lst[-1] = \
             itm_lst[-1] = \
-                f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
-            itm_str = u"\u00B1".join(itm_lst)
+                f"{' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
+            itm_str = "\u00B1".join(itm_lst)
 
             if idx >= len(cols):
                 # Diffs
                 rca = rcas[idx - len(cols)]
                 if rca:
                     # Add rcas to diffs
 
             if idx >= len(cols):
                 # Diffs
                 rca = rcas[idx - len(cols)]
                 if rca:
                     # Add rcas to diffs
-                    rca_nr = rca[u"data"].get(row[0], None)
+                    rca_nr = rca["data"].get(row[0], None)
                     if rca_nr:
                         hdr_len = len(header[idx + 1]) - 1
                         if hdr_len < 19:
                             hdr_len = 19
                         rca_nr = f"[{rca_nr}]"
                         itm_str = (
                     if rca_nr:
                         hdr_len = len(header[idx + 1]) - 1
                         if hdr_len < 19:
                             hdr_len = 19
                         rca_nr = f"[{rca_nr}]"
                         itm_str = (
-                            f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
-                            f"{u' ' * (hdr_len - 4 - len(itm_str))}"
+                            f"{' ' * (4 - len(rca_nr))}{rca_nr}"
+                            f"{' ' * (hdr_len - 4 - len(itm_str))}"
                             f"{itm_str}"
                         )
             row.append(itm_str)
         tbl_final.append(row)
 
     # Generate csv tables:
                             f"{itm_str}"
                         )
             row.append(itm_str)
         tbl_final.append(row)
 
     # Generate csv tables:
-    csv_file_name = f"{table[u'output-file']}.csv"
+    csv_file_name = f"{table['output-file']}.csv"
     logging.info(f"    Writing the file {csv_file_name}")
     logging.info(f"    Writing the file {csv_file_name}")
-    with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
-        file_handler.write(u";".join(header) + u"\n")
+    with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
+        file_handler.write(";".join(header) + "\n")
         for test in tbl_final:
         for test in tbl_final:
-            file_handler.write(u";".join([str(item) for item in test]) + u"\n")
+            file_handler.write(";".join([str(item) for item in test]) + "\n")
 
     # Generate txt table:
 
     # Generate txt table:
-    txt_file_name = f"{table[u'output-file']}.txt"
+    txt_file_name = f"{table['output-file']}.txt"
     logging.info(f"    Writing the file {txt_file_name}")
     logging.info(f"    Writing the file {txt_file_name}")
-    convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
+    convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=";")
 
 
-    with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
+    with open(txt_file_name, 'a', encoding='utf-8') as file_handler:
         file_handler.write(legend)
         file_handler.write(footnote)
 
         file_handler.write(legend)
         file_handler.write(footnote)
 
@@ -1973,11 +2107,11 @@ def table_comparison(table, input_data):
     _tpc_generate_html_table(
         header,
         tbl_final,
     _tpc_generate_html_table(
         header,
         tbl_final,
-        table[u'output-file'],
+        table['output-file'],
         legend=legend,
         footnote=footnote,
         sort_data=False,
         legend=legend,
         footnote=footnote,
         sort_data=False,
-        title=table.get(u"title", u"")
+        title=table.get("title", "")
     )
 
 
     )
 
 
@@ -2131,7 +2265,19 @@ def table_weekly_comparison(table, in_data):
 
     txt_file_name = f"{table[u'output-file']}.txt"
     logging.info(f"    Writing the file {txt_file_name}")
 
     txt_file_name = f"{table[u'output-file']}.txt"
     logging.info(f"    Writing the file {txt_file_name}")
-    convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
+    try:
+        convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
+    except Exception as err:
+        logging.error(repr(err))
+        for hdr in header:
+            logging.info(",".join(hdr))
+        for test in tbl_lst:
+            logging.info(",".join(
+                [
+                    str(item).replace(u"None", u"-").replace(u"nan", u"-").
+                    replace(u"null", u"-") for item in test
+                ]
+            ))
 
     # Reorganize header in txt table
     txt_table = list()
 
     # Reorganize header in txt table
     txt_table = list()