PAL: Add debug output to table_weekly_comparison
[csit.git] / resources / tools / presentation / generator_tables.py
index a72551f..82e59c4 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -17,6 +17,7 @@
 
 import logging
 import csv
+import math
 import re
 
 from collections import OrderedDict
@@ -28,6 +29,7 @@ from copy import deepcopy
 import plotly.graph_objects as go
 import plotly.offline as ploff
 import pandas as pd
+import prettytable
 
 from numpy import nan, isnan
 from yaml import load, FullLoader, YAMLError
@@ -38,6 +40,8 @@ from pal_utils import mean, stdev, classify_anomalies, \
 
 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
 
+NORM_FREQ = 2.0  # [GHz]
+
 
 def generate_tables(spec, data):
     """Generate all tables specified in the specification file.
@@ -49,30 +53,128 @@ def generate_tables(spec, data):
     """
 
     generator = {
-        u"table_merged_details": table_merged_details,
-        u"table_soak_vs_ndr": table_soak_vs_ndr,
-        u"table_perf_trending_dash": table_perf_trending_dash,
-        u"table_perf_trending_dash_html": table_perf_trending_dash_html,
-        u"table_last_failed_tests": table_last_failed_tests,
-        u"table_failed_tests": table_failed_tests,
-        u"table_failed_tests_html": table_failed_tests_html,
-        u"table_oper_data_html": table_oper_data_html,
-        u"table_comparison": table_comparison,
-        u"table_weekly_comparison": table_weekly_comparison
+        "table_merged_details": table_merged_details,
+        "table_soak_vs_ndr": table_soak_vs_ndr,
+        "table_perf_trending_dash": table_perf_trending_dash,
+        "table_perf_trending_dash_html": table_perf_trending_dash_html,
+        "table_last_failed_tests": table_last_failed_tests,
+        "table_failed_tests": table_failed_tests,
+        "table_failed_tests_html": table_failed_tests_html,
+        "table_oper_data_html": table_oper_data_html,
+        "table_comparison": table_comparison,
+        "table_weekly_comparison": table_weekly_comparison,
+        "table_job_spec_duration": table_job_spec_duration
     }
 
     logging.info(u"Generating the tables ...")
+
+    norm_factor = dict()
+    for key, val in spec.environment.get("frequency", dict()).items():
+        norm_factor[key] = NORM_FREQ / val
+
     for table in spec.tables:
         try:
-            if table[u"algorithm"] == u"table_weekly_comparison":
-                table[u"testbeds"] = spec.environment.get(u"testbeds", None)
-            generator[table[u"algorithm"]](table, data)
+            if table["algorithm"] == "table_weekly_comparison":
+                table["testbeds"] = spec.environment.get("testbeds", None)
+            if table["algorithm"] == "table_comparison":
+                table["norm_factor"] = norm_factor
+            generator[table["algorithm"]](table, data)
         except NameError as err:
             logging.error(
-                f"Probably algorithm {table[u'algorithm']} is not defined: "
+                f"Probably algorithm {table['algorithm']} is not defined: "
                 f"{repr(err)}"
             )
-    logging.info(u"Done.")
+    logging.info("Done.")
+
+
+def table_job_spec_duration(table, input_data):
+    """Generate the table(s) with algorithm: table_job_spec_duration
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    _ = input_data
+
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
+
+    jb_type = table.get(u"jb-type", None)
+
+    tbl_lst = list()
+    if jb_type == u"iterative":
+        for line in table.get(u"lines", tuple()):
+            tbl_itm = {
+                u"name": line.get(u"job-spec", u""),
+                u"data": list()
+            }
+            for job, builds in line.get(u"data-set", dict()).items():
+                for build_nr in builds:
+                    try:
+                        minutes = input_data.metadata(
+                            job, str(build_nr)
+                        )[u"elapsedtime"] // 60000
+                    except (KeyError, IndexError, ValueError, AttributeError):
+                        continue
+                    tbl_itm[u"data"].append(minutes)
+            tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
+            tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
+            tbl_lst.append(tbl_itm)
+    elif jb_type == u"coverage":
+        job = table.get(u"data", None)
+        if not job:
+            return
+        for line in table.get(u"lines", tuple()):
+            try:
+                tbl_itm = {
+                    u"name": line.get(u"job-spec", u""),
+                    u"mean": input_data.metadata(
+                        list(job.keys())[0], str(line[u"build"])
+                    )[u"elapsedtime"] // 60000,
+                    u"stdev": float(u"nan")
+                }
+                tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
+            except (KeyError, IndexError, ValueError, AttributeError):
+                continue
+            tbl_lst.append(tbl_itm)
+    else:
+        logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
+        return
+
+    for line in tbl_lst:
+        line[u"mean"] = \
+            f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
+        if math.isnan(line[u"stdev"]):
+            line[u"stdev"] = u""
+        else:
+            line[u"stdev"] = \
+                f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
+
+    if not tbl_lst:
+        return
+
+    rows = list()
+    for itm in tbl_lst:
+        rows.append([
+            itm[u"name"],
+            f"{len(itm[u'data'])}",
+            f"{itm[u'mean']} +- {itm[u'stdev']}"
+            if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
+        ])
+
+    txt_table = prettytable.PrettyTable(
+        [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
+    )
+    for row in rows:
+        txt_table.add_row(row)
+    txt_table.align = u"r"
+    txt_table.align[u"Job Specification"] = u"l"
+
+    file_name = f"{table.get(u'output-file', u'')}.txt"
+    with open(file_name, u"wt", encoding='utf-8') as txt_file:
+        txt_file.write(str(txt_table))
 
 
 def table_oper_data_html(table, input_data):
@@ -93,7 +195,7 @@ def table_oper_data_html(table, input_data):
     )
     data = input_data.filter_data(
         table,
-        params=[u"name", u"parent", u"show-run", u"type"],
+        params=[u"name", u"parent", u"telemetry-show-run", u"type"],
         continue_on_error=True
     )
     if data.empty:
@@ -146,7 +248,8 @@ def table_oper_data_html(table, input_data):
         )
         thead.text = u"\t"
 
-        if tst_data.get(u"show-run", u"No Data") == u"No Data":
+        if tst_data.get(u"telemetry-show-run", None) is None or \
+                isinstance(tst_data[u"telemetry-show-run"], str):
             trow = ET.SubElement(
                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
             )
@@ -176,17 +279,43 @@ def table_oper_data_html(table, input_data):
             u"Average Vector Size"
         )
 
-        for dut_data in tst_data[u"show-run"].values():
+        for dut_data in tst_data[u"telemetry-show-run"].values():
             trow = ET.SubElement(
                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
             )
             tcol = ET.SubElement(
                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
             )
-            if dut_data.get(u"threads", None) is None:
+            if dut_data.get(u"runtime", None) is None:
                 tcol.text = u"No Data"
                 continue
 
+            runtime = dict()
+            for item in dut_data[u"runtime"].get(u"data", tuple()):
+                tid = int(item[u"labels"][u"thread_id"])
+                if runtime.get(tid, None) is None:
+                    runtime[tid] = dict()
+                gnode = item[u"labels"][u"graph_node"]
+                if runtime[tid].get(gnode, None) is None:
+                    runtime[tid][gnode] = dict()
+                try:
+                    runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
+                except ValueError:
+                    runtime[tid][gnode][item[u"name"]] = item[u"value"]
+
+            threads = dict({idx: list() for idx in range(len(runtime))})
+            for idx, run_data in runtime.items():
+                for gnode, gdata in run_data.items():
+                    threads[idx].append([
+                        gnode,
+                        int(gdata[u"calls"]),
+                        int(gdata[u"vectors"]),
+                        int(gdata[u"suspends"]),
+                        float(gdata[u"clocks"]),
+                        float(gdata[u"vectors"] / gdata[u"calls"]) \
+                            if gdata[u"calls"] else 0.0
+                    ])
+
             bold = ET.SubElement(tcol, u"b")
             bold.text = (
                 f"Host IP: {dut_data.get(u'host', '')}, "
@@ -200,7 +329,7 @@ def table_oper_data_html(table, input_data):
             )
             thead.text = u"\t"
 
-            for thread_nr, thread in dut_data[u"threads"].items():
+            for thread_nr, thread in threads.items():
                 trow = ET.SubElement(
                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
                 )
@@ -323,7 +452,8 @@ def table_merged_details(table, input_data):
         suite_name = suite[u"name"]
         table_lst = list()
         for test in data.keys():
-            if data[test][u"parent"] not in suite_name:
+            if data[test][u"status"] != u"PASS" or \
+                    data[test][u"parent"] not in suite_name:
                 continue
             row_lst = list()
             for column in table[u"columns"]:
@@ -348,13 +478,14 @@ def table_merged_details(table, input_data):
                         # Temporary solution: remove NDR results from message:
                         if bool(table.get(u'remove-ndr', False)):
                             try:
-                                col_data = col_data.split(u" |br| ", 1)[1]
+                                col_data = col_data.split(u"\n", 1)[1]
                             except IndexError:
                                 pass
+                        col_data = col_data.replace(u'\n', u' |br| ').\
+                            replace(u'\r', u'').replace(u'"', u"'")
                         col_data = f" |prein| {col_data} |preout| "
-                    elif column[u"data"].split(u" ")[1] in \
-                            (u"conf-history", u"show-run"):
-                        col_data = col_data.replace(u" |br| ", u"", 1)
+                    elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
+                        col_data = col_data.replace(u'\n', u' |br| ')
                         col_data = f" |prein| {col_data[:-5]} |preout| "
                     row_lst.append(f'"{col_data}"')
                 except KeyError:
@@ -386,12 +517,7 @@ def _tpc_modify_test_name(test_name, ignore_nic=False):
     :rtype: str
     """
     test_name_mod = test_name.\
-        replace(u"-ndrpdrdisc", u""). \
         replace(u"-ndrpdr", u"").\
-        replace(u"-pdrdisc", u""). \
-        replace(u"-ndrdisc", u"").\
-        replace(u"-pdr", u""). \
-        replace(u"-ndr", u""). \
         replace(u"1t1c", u"1c").\
         replace(u"2t1c", u"1c"). \
         replace(u"2t2c", u"2c").\
@@ -425,7 +551,7 @@ def _tpc_insert_data(target, src, include_tests):
     """Insert src data to the target structure.
 
     :param target: Target structure where the data is placed.
-    :param src: Source data to be placed into the target stucture.
+    :param src: Source data to be placed into the target structure.
     :param include_tests: Which results will be included (MRR, NDR, PDR).
     :type target: list
     :type src: dict
@@ -439,6 +565,29 @@ def _tpc_insert_data(target, src, include_tests):
             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
         elif include_tests == u"NDR":
             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
+        elif u"latency" in include_tests:
+            keys = include_tests.split(u"-")
+            if len(keys) == 4:
+                lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
+                target[u"data"].append(
+                    float(u"nan") if lat == -1 else lat * 1e6
+                )
+        elif include_tests == u"hoststack":
+            try:
+                target[u"data"].append(
+                    float(src[u"result"][u"bits_per_second"])
+                )
+            except KeyError:
+                target[u"data"].append(
+                    (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
+                    ((float(src[u"result"][u"client"][u"time"]) +
+                      float(src[u"result"][u"server"][u"time"])) / 2)
+                )
+        elif include_tests == u"vsap":
+            try:
+                target[u"data"].append(src[u"result"][u"cps"])
+            except KeyError:
+                target[u"data"].append(src[u"result"][u"rps"])
     except (KeyError, TypeError):
         pass
 
@@ -594,6 +743,7 @@ def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
         path = u"_tmp/src/vpp_performance_tests/comparisons/"
     else:
         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
+    logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
         rst_file.write(
             u"\n"
@@ -611,10 +761,23 @@ def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
             f'</iframe>\n\n'
         )
+
         if legend:
-            rst_file.write(legend[1:].replace(u"\n", u" |br| "))
+            try:
+                itm_lst = legend[1:-2].split(u"\n")
+                rst_file.write(
+                    f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
+                )
+            except IndexError as err:
+                logging.error(f"Legend cannot be written to html file\n{err}")
         if footnote:
-            rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
+            try:
+                itm_lst = footnote[1:].split(u"\n")
+                rst_file.write(
+                    f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
+                )
+            except IndexError as err:
+                logging.error(f"Footnote cannot be written to html file\n{err}")
 
 
 def table_soak_vs_ndr(table, input_data):
@@ -667,8 +830,7 @@ def table_soak_vs_ndr(table, input_data):
             f"Percentage change calculated for mean values.\n"
             u"Stdev(Diff): "
             u"Standard deviation of percentage change calculated for mean "
-            u"values.\n"
-            u":END"
+            u"values."
         )
     except (AttributeError, KeyError) as err:
         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
@@ -776,17 +938,17 @@ def table_soak_vs_ndr(table, input_data):
     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
 
     # Generate csv tables:
-    csv_file = f"{table[u'output-file']}.csv"
-    with open(csv_file, u"wt") as file_handler:
+    csv_file_name = f"{table[u'output-file']}.csv"
+    with open(csv_file_name, u"wt") as file_handler:
         file_handler.write(header_str)
         for test in tbl_lst:
             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
 
     convert_csv_to_pretty_txt(
-        csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
+        csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
     )
-    with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
-        txt_file.write(legend)
+    with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
+        file_handler.write(legend)
 
     # Generate html table:
     _tpc_generate_html_table(
@@ -822,13 +984,15 @@ def table_perf_trending_dash(table, input_data):
     header = [
         u"Test Case",
         u"Trend [Mpps]",
-        u"Short-Term Change [%]",
+        u"Runs [#]",
         u"Long-Term Change [%]",
         u"Regressions [#]",
         u"Progressions [#]"
     ]
     header_str = u",".join(header) + u"\n"
 
+    incl_tests = table.get(u"include-tests", u"MRR")
+
     # Prepare data to the table:
     tbl_dict = dict()
     for job, builds in table[u"data"].items():
@@ -846,8 +1010,15 @@ def table_perf_trending_dash(table, input_data):
                         u"data": OrderedDict()
                     }
                 try:
-                    tbl_dict[tst_name][u"data"][str(build)] = \
-                        tst_data[u"result"][u"receive-rate"]
+                    if incl_tests == u"MRR":
+                        tbl_dict[tst_name][u"data"][str(build)] = \
+                            tst_data[u"result"][u"receive-rate"]
+                    elif incl_tests == u"NDR":
+                        tbl_dict[tst_name][u"data"][str(build)] = \
+                            tst_data[u"throughput"][u"NDR"][u"LOWER"]
+                    elif incl_tests == u"PDR":
+                        tbl_dict[tst_name][u"data"][str(build)] = \
+                            tst_data[u"throughput"][u"PDR"][u"LOWER"]
                 except (TypeError, KeyError):
                     pass  # No data in output.xml for this test
 
@@ -857,7 +1028,11 @@ def table_perf_trending_dash(table, input_data):
         if len(data_t) < 2:
             continue
 
-        classification_lst, avgs = classify_anomalies(data_t)
+        try:
+            classification_lst, avgs, _ = classify_anomalies(data_t)
+        except ValueError as err:
+            logging.info(f"{err} Skipping")
+            return
 
         win_size = min(len(data_t), table[u"window"])
         long_win_size = min(len(data_t), table[u"long-trend-window"])
@@ -871,6 +1046,13 @@ def table_perf_trending_dash(table, input_data):
         last_avg = avgs[-1]
         avg_week_ago = avgs[max(-win_size, -len(avgs))]
 
+        nr_of_last_avgs = 0;
+        for x in reversed(avgs):
+            if x == last_avg:
+                nr_of_last_avgs += 1
+            else:
+                break
+
         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
             rel_change_last = nan
         else:
@@ -892,27 +1074,23 @@ def table_perf_trending_dash(table, input_data):
             tbl_lst.append(
                 [tbl_dict[tst_name][u"name"],
                  round(last_avg / 1e6, 2),
-                 rel_change_last,
+                 nr_of_last_avgs,
                  rel_change_long,
-                 classification_lst[-win_size:].count(u"regression"),
-                 classification_lst[-win_size:].count(u"progression")])
+                 classification_lst[-win_size+1:].count(u"regression"),
+                 classification_lst[-win_size+1:].count(u"progression")])
 
     tbl_lst.sort(key=lambda rel: rel[0])
-
-    tbl_sorted = list()
-    for nrr in range(table[u"window"], -1, -1):
-        tbl_reg = [item for item in tbl_lst if item[4] == nrr]
-        for nrp in range(table[u"window"], -1, -1):
-            tbl_out = [item for item in tbl_reg if item[5] == nrp]
-            tbl_out.sort(key=lambda rel: rel[2])
-            tbl_sorted.extend(tbl_out)
+    tbl_lst.sort(key=lambda rel: rel[2])
+    tbl_lst.sort(key=lambda rel: rel[3])
+    tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
+    tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
 
     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
 
     logging.info(f"    Writing file: {file_name}")
     with open(file_name, u"wt") as file_handler:
         file_handler.write(header_str)
-        for test in tbl_sorted:
+        for test in tbl_lst:
             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
 
     logging.info(f"    Writing file: {table[u'output-file']}.txt")
@@ -947,6 +1125,8 @@ def _generate_url(testbed, test_name):
         nic = u"x553"
     elif u"cx556" in test_name or u"cx556a" in test_name:
         nic = u"cx556a"
+    elif u"ena" in test_name:
+        nic = u"nitro50g"
     else:
         nic = u""
 
@@ -967,27 +1147,33 @@ def _generate_url(testbed, test_name):
 
     if u"1t1c" in test_name or \
         (u"-1c-" in test_name and
-         testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+         testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
         cores = u"1t1c"
     elif u"2t2c" in test_name or \
          (u"-2c-" in test_name and
-          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
         cores = u"2t2c"
     elif u"4t4c" in test_name or \
          (u"-4c-" in test_name and
-          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
         cores = u"4t4c"
     elif u"2t1c" in test_name or \
          (u"-1c-" in test_name and
-          testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
+          testbed in
+          (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+           u"2n-aws", u"3n-aws")):
         cores = u"2t1c"
     elif u"4t2c" in test_name or \
          (u"-2c-" in test_name and
-          testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
+          testbed in
+          (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+           u"2n-aws", u"3n-aws")):
         cores = u"4t2c"
     elif u"8t4c" in test_name or \
          (u"-4c-" in test_name and
-          testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
+          testbed in
+          (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+           u"2n-aws", u"3n-aws")):
         cores = u"8t4c"
     else:
         cores = u""
@@ -998,18 +1184,64 @@ def _generate_url(testbed, test_name):
         driver = u"l3fwd"
     elif u"avf" in test_name:
         driver = u"avf"
+    elif u"af-xdp" in test_name or u"af_xdp" in test_name:
+        driver = u"af_xdp"
     elif u"rdma" in test_name:
         driver = u"rdma"
     elif u"dnv" in testbed or u"tsh" in testbed:
         driver = u"ixgbe"
+    elif u"ena" in test_name:
+        driver = u"ena"
     else:
         driver = u"dpdk"
 
-    if u"acl" in test_name or \
-            u"macip" in test_name or \
-            u"nat" in test_name or \
-            u"policer" in test_name or \
-            u"cop" in test_name:
+    if u"macip-iacl1s" in test_name:
+        bsf = u"features-macip-iacl1"
+    elif u"macip-iacl10s" in test_name:
+        bsf = u"features-macip-iacl10"
+    elif u"macip-iacl50s" in test_name:
+        bsf = u"features-macip-iacl50"
+    elif u"iacl1s" in test_name:
+        bsf = u"features-iacl1"
+    elif u"iacl10s" in test_name:
+        bsf = u"features-iacl10"
+    elif u"iacl50s" in test_name:
+        bsf = u"features-iacl50"
+    elif u"oacl1s" in test_name:
+        bsf = u"features-oacl1"
+    elif u"oacl10s" in test_name:
+        bsf = u"features-oacl10"
+    elif u"oacl50s" in test_name:
+        bsf = u"features-oacl50"
+    elif u"nat44det" in test_name:
+        bsf = u"nat44det-bidir"
+    elif u"nat44ed" in test_name and u"udir" in test_name:
+        bsf = u"nat44ed-udir"
+    elif u"-cps" in test_name and u"ethip4udp" in test_name:
+        bsf = u"udp-cps"
+    elif u"-cps" in test_name and u"ethip4tcp" in test_name:
+        bsf = u"tcp-cps"
+    elif u"-pps" in test_name and u"ethip4udp" in test_name:
+        bsf = u"udp-pps"
+    elif u"-pps" in test_name and u"ethip4tcp" in test_name:
+        bsf = u"tcp-pps"
+    elif u"-tput" in test_name and u"ethip4udp" in test_name:
+        bsf = u"udp-tput"
+    elif u"-tput" in test_name and u"ethip4tcp" in test_name:
+        bsf = u"tcp-tput"
+    elif u"udpsrcscale" in test_name:
+        bsf = u"features-udp"
+    elif u"iacl" in test_name:
+        bsf = u"features"
+    elif u"policer" in test_name:
+        bsf = u"features"
+    elif u"adl" in test_name:
+        bsf = u"features"
+    elif u"cop" in test_name:
+        bsf = u"features"
+    elif u"nat" in test_name:
+        bsf = u"features"
+    elif u"macip" in test_name:
         bsf = u"features"
     elif u"scale" in test_name:
         bsf = u"scale"
@@ -1020,6 +1252,24 @@ def _generate_url(testbed, test_name):
 
     if u"114b" in test_name and u"vhost" in test_name:
         domain = u"vts"
+    elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
+        domain = u"nat44"
+        if u"nat44det" in test_name:
+            domain += u"-det-bidir"
+        else:
+            domain += u"-ed"
+        if u"udir" in test_name:
+            domain += u"-unidir"
+        elif u"-ethip4udp-" in test_name:
+            domain += u"-udp"
+        elif u"-ethip4tcp-" in test_name:
+            domain += u"-tcp"
+        if u"-cps" in test_name:
+            domain += u"-cps"
+        elif u"-pps" in test_name:
+            domain += u"-pps"
+        elif u"-tput" in test_name:
+            domain += u"-tput"
     elif u"testpmd" in test_name or u"l3fwd" in test_name:
         domain = u"dpdk"
     elif u"memif" in test_name:
@@ -1046,8 +1296,12 @@ def _generate_url(testbed, test_name):
             bsf += u"-sw"
         elif u"hw" in test_name:
             bsf += u"-hw"
+        elif u"spe" in test_name:
+            bsf += u"-spe"
     elif u"ethip4vxlan" in test_name:
         domain = u"ip4_tunnels"
+    elif u"ethip4udpgeneve" in test_name:
+        domain = u"ip4_tunnels"
     elif u"ip4base" in test_name or u"ip4scale" in test_name:
         domain = u"ip4"
     elif u"ip6base" in test_name or u"ip6scale" in test_name:
@@ -1083,15 +1337,33 @@ def table_perf_trending_dash_html(table, input_data):
     if not table.get(u"testbed", None):
         logging.error(
             f"The testbed is not defined for the table "
-            f"{table.get(u'title', u'')}."
+            f"{table.get(u'title', u'')}. Skipping."
         )
         return
 
+    test_type = table.get(u"test-type", u"MRR")
+    if test_type not in (u"MRR", u"NDR", u"PDR"):
+        logging.error(
+            f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
+            f"Skipping."
+        )
+        return
+
+    if test_type in (u"NDR", u"PDR"):
+        lnk_dir = u"../ndrpdr_trending/"
+        lnk_sufix = f"-{test_type.lower()}"
+    else:
+        lnk_dir = u"../trending/"
+        lnk_sufix = u""
+
     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
 
     try:
         with open(table[u"input-file"], u'rt') as csv_file:
             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
+    except FileNotFoundError as err:
+        logging.warning(f"{err}")
+        return
     except KeyError:
         logging.warning(u"The input file is not defined.")
         return
@@ -1146,13 +1418,14 @@ def table_perf_trending_dash_html(table, input_data):
                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
             )
             # Name:
-            if c_idx == 0:
+            if c_idx == 0 and table.get(u"add-links", True):
                 ref = ET.SubElement(
                     tdata,
                     u"a",
                     attrib=dict(
-                        href=f"../trending/"
-                             f"{_generate_url(table.get(u'testbed', ''), item)}"
+                        href=f"{lnk_dir}"
+                        f"{_generate_url(table.get(u'testbed', ''), item)}"
+                        f"{lnk_sufix}"
                     )
                 )
                 ref.text = item
@@ -1202,6 +1475,8 @@ def table_last_failed_tests(table, input_data):
             build = str(build)
             try:
                 version = input_data.metadata(job, build).get(u"version", u"")
+                duration = \
+                    input_data.metadata(job, build).get(u"elapsedtime", u"")
             except KeyError:
                 logging.error(f"Data for {job}: {build} is not present.")
                 return
@@ -1219,16 +1494,21 @@ def table_last_failed_tests(table, input_data):
                 if not groups:
                     continue
                 nic = groups.group(0)
-                failed_tests.append(f"{nic}-{tst_data[u'name']}")
-            tbl_list.append(str(passed))
-            tbl_list.append(str(failed))
+                msg = tst_data[u'msg'].replace(u"\n", u"")
+                msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
+                             'xxx.xxx.xxx.xxx', msg)
+                msg = msg.split(u'Also teardown failed')[0]
+                failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
+            tbl_list.append(passed)
+            tbl_list.append(failed)
+            tbl_list.append(duration)
             tbl_list.extend(failed_tests)
 
     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
     logging.info(f"    Writing file: {file_name}")
     with open(file_name, u"wt") as file_handler:
         for test in tbl_list:
-            file_handler.write(test + u'\n')
+            file_handler.write(f"{test}\n")
 
 
 def table_failed_tests(table, input_data):
@@ -1250,6 +1530,10 @@ def table_failed_tests(table, input_data):
     )
     data = input_data.filter_data(table, continue_on_error=True)
 
+    test_type = u"MRR"
+    if u"NDRPDR" in table.get(u"filter", list()):
+        test_type = u"NDRPDR"
+
     # Prepare the header of the tables
     header = [
         u"Test Case",
@@ -1313,15 +1597,14 @@ def table_failed_tests(table, input_data):
                 fails_last_csit = val[3]
         if fails_nr:
             max_fails = fails_nr if fails_nr > max_fails else max_fails
-            tbl_lst.append(
-                [
-                    tst_data[u"name"],
-                    fails_nr,
-                    fails_last_date,
-                    fails_last_vpp,
-                    f"mrr-daily-build-{fails_last_csit}"
-                ]
-            )
+            tbl_lst.append([
+                tst_data[u"name"],
+                fails_nr,
+                fails_last_date,
+                fails_last_vpp,
+                f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
+                f"-build-{fails_last_csit}"
+            ])
 
     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
     tbl_sorted = list()
@@ -1355,10 +1638,25 @@ def table_failed_tests_html(table, input_data):
     if not table.get(u"testbed", None):
         logging.error(
             f"The testbed is not defined for the table "
-            f"{table.get(u'title', u'')}."
+            f"{table.get(u'title', u'')}. Skipping."
         )
         return
 
+    test_type = table.get(u"test-type", u"MRR")
+    if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
+        logging.error(
+            f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
+            f"Skipping."
+        )
+        return
+
+    if test_type in (u"NDRPDR", u"NDR", u"PDR"):
+        lnk_dir = u"../ndrpdr_trending/"
+        lnk_sufix = u"-pdr"
+    else:
+        lnk_dir = u"../trending/"
+        lnk_sufix = u""
+
     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
 
     try:
@@ -1400,13 +1698,14 @@ def table_failed_tests_html(table, input_data):
                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
             )
             # Name:
-            if c_idx == 0:
+            if c_idx == 0 and table.get(u"add-links", True):
                 ref = ET.SubElement(
                     tdata,
                     u"a",
                     attrib=dict(
-                        href=f"../trending/"
-                             f"{_generate_url(table.get(u'testbed', ''), item)}"
+                        href=f"{lnk_dir}"
+                        f"{_generate_url(table.get(u'testbed', ''), item)}"
+                        f"{lnk_sufix}"
                     )
                 )
                 ref.text = item
@@ -1432,121 +1731,140 @@ def table_comparison(table, input_data):
     :type table: pandas.Series
     :type input_data: InputData
     """
-    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
+    logging.info(f"  Generating the table {table.get('title', '')} ...")
 
     # Transform the data
     logging.info(
-        f"    Creating the data set for the {table.get(u'type', u'')} "
-        f"{table.get(u'title', u'')}."
+        f"    Creating the data set for the {table.get('type', '')} "
+        f"{table.get('title', '')}."
     )
 
-    columns = table.get(u"columns", None)
+    columns = table.get("columns", None)
     if not columns:
         logging.error(
-            f"No columns specified for {table.get(u'title', u'')}. Skipping."
+            f"No columns specified for {table.get('title', '')}. Skipping."
         )
         return
 
     cols = list()
     for idx, col in enumerate(columns):
-        if col.get(u"data-set", None) is None:
-            logging.warning(f"No data for column {col.get(u'title', u'')}")
+        if col.get("data-set", None) is None:
+            logging.warning(f"No data for column {col.get('title', '')}")
             continue
-        tag = col.get(u"tag", None)
+        tag = col.get("tag", None)
         data = input_data.filter_data(
             table,
-            params=[u"throughput", u"result", u"name", u"parent", u"tags"],
-            data=col[u"data-set"],
+            params=[
+                "throughput",
+                "result",
+                "latency",
+                "name",
+                "parent",
+                "tags"
+            ],
+            data=col["data-set"],
             continue_on_error=True
         )
         col_data = {
-            u"title": col.get(u"title", f"Column{idx}"),
-            u"data": dict()
+            "title": col.get("title", f"Column{idx}"),
+            "data": dict()
         }
         for builds in data.values:
             for build in builds:
                 for tst_name, tst_data in build.items():
-                    if tag and tag not in tst_data[u"tags"]:
+                    if tag and tag not in tst_data["tags"]:
                         continue
                     tst_name_mod = \
                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
-                        replace(u"2n1l-", u"")
-                    if col_data[u"data"].get(tst_name_mod, None) is None:
-                        name = tst_data[u'name'].rsplit(u'-', 1)[0]
-                        if u"across testbeds" in table[u"title"].lower() or \
-                                u"across topologies" in table[u"title"].lower():
+                        replace("2n1l-", "")
+                    if col_data["data"].get(tst_name_mod, None) is None:
+                        name = tst_data['name'].rsplit('-', 1)[0]
+                        if "across testbeds" in table["title"].lower() or \
+                                "across topologies" in table["title"].lower():
                             name = _tpc_modify_displayed_test_name(name)
-                        col_data[u"data"][tst_name_mod] = {
-                            u"name": name,
-                            u"replace": True,
-                            u"data": list(),
-                            u"mean": None,
-                            u"stdev": None
+                        col_data["data"][tst_name_mod] = {
+                            "name": name,
+                            "replace": True,
+                            "data": list(),
+                            "mean": None,
+                            "stdev": None
                         }
                     _tpc_insert_data(
-                        target=col_data[u"data"][tst_name_mod],
+                        target=col_data["data"][tst_name_mod],
                         src=tst_data,
-                        include_tests=table[u"include-tests"]
+                        include_tests=table["include-tests"]
                     )
 
-        replacement = col.get(u"data-replacement", None)
+        replacement = col.get("data-replacement", None)
         if replacement:
             rpl_data = input_data.filter_data(
                 table,
-                params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+                params=[
+                    "throughput",
+                    "result",
+                    "latency",
+                    "name",
+                    "parent",
+                    "tags"
+                ],
                 data=replacement,
                 continue_on_error=True
             )
             for builds in rpl_data.values:
                 for build in builds:
                     for tst_name, tst_data in build.items():
-                        if tag and tag not in tst_data[u"tags"]:
+                        if tag and tag not in tst_data["tags"]:
                             continue
                         tst_name_mod = \
                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
-                            replace(u"2n1l-", u"")
-                        if col_data[u"data"].get(tst_name_mod, None) is None:
-                            name = tst_data[u'name'].rsplit(u'-', 1)[0]
-                            if u"across testbeds" in table[u"title"].lower() \
-                                    or u"across topologies" in \
-                                    table[u"title"].lower():
+                            replace("2n1l-", "")
+                        if col_data["data"].get(tst_name_mod, None) is None:
+                            name = tst_data['name'].rsplit('-', 1)[0]
+                            if "across testbeds" in table["title"].lower() \
+                                    or "across topologies" in \
+                                    table["title"].lower():
                                 name = _tpc_modify_displayed_test_name(name)
-                            col_data[u"data"][tst_name_mod] = {
-                                u"name": name,
-                                u"replace": False,
-                                u"data": list(),
-                                u"mean": None,
-                                u"stdev": None
+                            col_data["data"][tst_name_mod] = {
+                                "name": name,
+                                "replace": False,
+                                "data": list(),
+                                "mean": None,
+                                "stdev": None
                             }
-                        if col_data[u"data"][tst_name_mod][u"replace"]:
-                            col_data[u"data"][tst_name_mod][u"replace"] = False
-                            col_data[u"data"][tst_name_mod][u"data"] = list()
+                        if col_data["data"][tst_name_mod]["replace"]:
+                            col_data["data"][tst_name_mod]["replace"] = False
+                            col_data["data"][tst_name_mod]["data"] = list()
                         _tpc_insert_data(
-                            target=col_data[u"data"][tst_name_mod],
+                            target=col_data["data"][tst_name_mod],
                             src=tst_data,
-                            include_tests=table[u"include-tests"]
+                            include_tests=table["include-tests"]
                         )
 
-        if table[u"include-tests"] in (u"NDR", u"PDR"):
-            for tst_name, tst_data in col_data[u"data"].items():
-                if tst_data[u"data"]:
-                    tst_data[u"mean"] = mean(tst_data[u"data"])
-                    tst_data[u"stdev"] = stdev(tst_data[u"data"])
+        if table["include-tests"] in ("NDR", "PDR", "hoststack", "vsap") \
+                or "latency" in table["include-tests"]:
+            for tst_name, tst_data in col_data["data"].items():
+                if tst_data["data"]:
+                    tst_data["mean"] = mean(tst_data["data"])
+                    tst_data["stdev"] = stdev(tst_data["data"])
 
         cols.append(col_data)
 
     tbl_dict = dict()
     for col in cols:
-        for tst_name, tst_data in col[u"data"].items():
+        for tst_name, tst_data in col["data"].items():
             if tbl_dict.get(tst_name, None) is None:
                 tbl_dict[tst_name] = {
-                    "name": tst_data[u"name"]
+                    "name": tst_data["name"]
                 }
-            tbl_dict[tst_name][col[u"title"]] = {
-                u"mean": tst_data[u"mean"],
-                u"stdev": tst_data[u"stdev"]
+            tbl_dict[tst_name][col["title"]] = {
+                "mean": tst_data["mean"],
+                "stdev": tst_data["stdev"]
             }
 
+    if not tbl_dict:
+        logging.warning(f"No data for table {table.get('title', '')}!")
+        return
+
     tbl_lst = list()
     for tst_data in tbl_dict.values():
         row = [tst_data[u"name"], ]
@@ -1554,131 +1872,155 @@ def table_comparison(table, input_data):
             row.append(tst_data.get(col[u"title"], None))
         tbl_lst.append(row)
 
-    comparisons = table.get(u"comparisons", None)
+    comparisons = table.get("comparisons", None)
+    rcas = list()
     if comparisons and isinstance(comparisons, list):
         for idx, comp in enumerate(comparisons):
             try:
-                col_ref = int(comp[u"reference"])
-                col_cmp = int(comp[u"compare"])
+                col_ref = int(comp["reference"])
+                col_cmp = int(comp["compare"])
             except KeyError:
-                logging.warning(u"Comparison: No references defined! Skipping.")
+                logging.warning("Comparison: No references defined! Skipping.")
                 comparisons.pop(idx)
                 continue
-            if not (0 < col_ref <= len(cols) and
-                    0 < col_cmp <= len(cols)) or \
-                    col_ref == col_cmp:
+            if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
+                    col_ref == col_cmp):
                 logging.warning(f"Wrong values of reference={col_ref} "
                                 f"and/or compare={col_cmp}. Skipping.")
                 comparisons.pop(idx)
                 continue
+            rca_file_name = comp.get("rca-file", None)
+            if rca_file_name:
+                try:
+                    with open(rca_file_name, "r") as file_handler:
+                        rcas.append(
+                            {
+                                "title": f"RCA{idx + 1}",
+                                "data": load(file_handler, Loader=FullLoader)
+                            }
+                        )
+                except (YAMLError, IOError) as err:
+                    logging.warning(
+                        f"The RCA file {rca_file_name} does not exist or "
+                        f"it is corrupted!"
+                    )
+                    logging.debug(repr(err))
+                    rcas.append(None)
+            else:
+                rcas.append(None)
+    else:
+        comparisons = None
 
     tbl_cmp_lst = list()
     if comparisons:
         for row in tbl_lst:
             new_row = deepcopy(row)
-            add_to_tbl = False
             for comp in comparisons:
-                ref_itm = row[int(comp[u"reference"])]
+                ref_itm = row[int(comp["reference"])]
                 if ref_itm is None and \
-                        comp.get(u"reference-alt", None) is not None:
-                    ref_itm = row[int(comp[u"reference-alt"])]
+                        comp.get("reference-alt", None) is not None:
+                    ref_itm = row[int(comp["reference-alt"])]
                 cmp_itm = row[int(comp[u"compare"])]
                 if ref_itm is not None and cmp_itm is not None and \
-                        ref_itm[u"mean"] is not None and \
-                        cmp_itm[u"mean"] is not None and \
-                        ref_itm[u"stdev"] is not None and \
-                        cmp_itm[u"stdev"] is not None:
-                    delta, d_stdev = relative_change_stdev(
-                        ref_itm[u"mean"], cmp_itm[u"mean"],
-                        ref_itm[u"stdev"], cmp_itm[u"stdev"]
+                        ref_itm["mean"] is not None and \
+                        cmp_itm["mean"] is not None and \
+                        ref_itm["stdev"] is not None and \
+                        cmp_itm["stdev"] is not None:
+                    norm_factor_ref = table["norm_factor"].get(
+                        comp.get("norm-ref", ""),
+                        1.0
                     )
-                    new_row.append(
-                        {
-                            u"mean": delta * 1e6,
-                            u"stdev": d_stdev * 1e6
-                        }
+                    norm_factor_cmp = table["norm_factor"].get(
+                        comp.get("norm-cmp", ""),
+                        1.0
                     )
-                    add_to_tbl = True
+                    try:
+                        delta, d_stdev = relative_change_stdev(
+                            ref_itm["mean"] * norm_factor_ref,
+                            cmp_itm["mean"] * norm_factor_cmp,
+                            ref_itm["stdev"] * norm_factor_ref,
+                            cmp_itm["stdev"] * norm_factor_cmp
+                        )
+                    except ZeroDivisionError:
+                        break
+                    if delta is None or math.isnan(delta):
+                        break
+                    new_row.append({
+                        "mean": delta * 1e6,
+                        "stdev": d_stdev * 1e6
+                    })
                 else:
-                    new_row.append(None)
-            if add_to_tbl:
+                    break
+            else:
                 tbl_cmp_lst.append(new_row)
 
-    tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
-    tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
-
-    rcas = list()
-    rca_in = table.get(u"rca", None)
-    if rca_in and isinstance(rca_in, list):
-        for idx, itm in enumerate(rca_in):
-            try:
-                with open(itm.get(u"data", u""), u"r") as rca_file:
-                    rcas.append(
-                        {
-                            u"title": itm.get(u"title", f"RCA{idx}"),
-                            u"data": load(rca_file, Loader=FullLoader)
-                        }
-                    )
-            except (YAMLError, IOError) as err:
-                logging.warning(
-                    f"The RCA file {itm.get(u'data', u'')} does not exist or "
-                    f"it is corrupted!"
-                )
-                logging.debug(repr(err))
+    try:
+        tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
+        tbl_cmp_lst.sort(key=lambda rel: rel[-1]['mean'], reverse=True)
+    except TypeError as err:
+        logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
 
     tbl_for_csv = list()
     for line in tbl_cmp_lst:
         row = [line[0], ]
         for idx, itm in enumerate(line[1:]):
-            if itm is None:
-                row.append(u"NT")
-                row.append(u"NT")
+            if itm is None or not isinstance(itm, dict) or\
+                    itm.get('mean', None) is None or \
+                    itm.get('stdev', None) is None:
+                row.append("NT")
+                row.append("NT")
             else:
-                row.append(round(float(itm[u'mean']) / 1e6, 3))
-                row.append(round(float(itm[u'stdev']) / 1e6, 3))
+                row.append(round(float(itm['mean']) / 1e6, 3))
+                row.append(round(float(itm['stdev']) / 1e6, 3))
         for rca in rcas:
-            rca_nr = rca[u"data"].get(row[0], u"-")
-            row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+            if rca is None:
+                continue
+            rca_nr = rca["data"].get(row[0], "-")
+            row.append(f"[{rca_nr}]" if rca_nr != "-" else "-")
         tbl_for_csv.append(row)
 
-    header_csv = [u"Test Case", ]
+    header_csv = ["Test Case", ]
     for col in cols:
-        header_csv.append(f"Avg({col[u'title']})")
-        header_csv.append(f"Stdev({col[u'title']})")
+        header_csv.append(f"Avg({col['title']})")
+        header_csv.append(f"Stdev({col['title']})")
     for comp in comparisons:
         header_csv.append(
-            f"Avg({comp.get(u'title', u'')})"
+            f"Avg({comp.get('title', '')})"
         )
         header_csv.append(
-            f"Stdev({comp.get(u'title', u'')})"
+            f"Stdev({comp.get('title', '')})"
         )
-    header_csv.extend([rca[u"title"] for rca in rcas])
+    for rca in rcas:
+        if rca:
+            header_csv.append(rca["title"])
 
-    legend_lst = table.get(u"legend", None)
+    legend_lst = table.get("legend", None)
     if legend_lst is None:
-        legend = u""
+        legend = ""
     else:
-        legend = u"\n" + u"\n".join(legend_lst) + u"\n"
+        legend = "\n" + "\n".join(legend_lst) + "\n"
 
-    footnote = u""
-    for rca in rcas:
-        footnote += f"\n{rca[u'title']}:\n"
-        footnote += rca[u"data"].get(u"footnote", u"")
+    footnote = ""
+    if rcas and any(rcas):
+        footnote += "\nRoot Cause Analysis:\n"
+        for rca in rcas:
+            if rca:
+                footnote += f"{rca['data'].get('footnote', '')}\n"
 
-    csv_file = f"{table[u'output-file']}-csv.csv"
-    with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+    csv_file_name = f"{table['output-file']}-csv.csv"
+    with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
         file_handler.write(
-            u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
+            ",".join([f'"{itm}"' for itm in header_csv]) + "\n"
         )
         for test in tbl_for_csv:
             file_handler.write(
-                u",".join([f'"{item}"' for item in test]) + u"\n"
+                ",".join([f'"{item}"' for item in test]) + "\n"
             )
         if legend_lst:
             for item in legend_lst:
                 file_handler.write(f'"{item}"\n')
         if footnote:
-            for itm in footnote.split(u"\n"):
+            for itm in footnote.split("\n"):
                 file_handler.write(f'"{itm}"\n')
 
     tbl_tmp = list()
@@ -1686,75 +2028,90 @@ def table_comparison(table, input_data):
     for line in tbl_cmp_lst:
         row = [line[0], ]
         for idx, itm in enumerate(line[1:]):
-            if itm is None:
-                new_itm = u"NT"
+            if itm is None or not isinstance(itm, dict) or \
+                    itm.get('mean', None) is None or \
+                    itm.get('stdev', None) is None:
+                new_itm = "NT"
             else:
                 if idx < len(cols):
                     new_itm = (
-                        f"{round(float(itm[u'mean']) / 1e6, 1)} "
-                        f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
-                        replace(u"nan", u"NaN")
+                        f"{round(float(itm['mean']) / 1e6, 2)} "
+                        f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
+                        replace("nan", "NaN")
                     )
                 else:
                     new_itm = (
-                        f"{round(float(itm[u'mean']) / 1e6, 1):+} "
-                        f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
-                        replace(u"nan", u"NaN")
+                        f"{round(float(itm['mean']) / 1e6, 2):+} "
+                        f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
+                        replace("nan", "NaN")
                     )
-            if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
-                max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
+            if len(new_itm.rsplit(" ", 1)[-1]) > max_lens[idx]:
+                max_lens[idx] = len(new_itm.rsplit(" ", 1)[-1])
             row.append(new_itm)
 
         tbl_tmp.append(row)
 
+    header = ["Test Case", ]
+    header.extend([col["title"] for col in cols])
+    header.extend([comp.get("title", "") for comp in comparisons])
+
     tbl_final = list()
     for line in tbl_tmp:
         row = [line[0], ]
         for idx, itm in enumerate(line[1:]):
-            if itm in (u"NT", u"NaN"):
+            if itm in ("NT", "NaN"):
                 row.append(itm)
                 continue
-            itm_lst = itm.rsplit(u"\u00B1", 1)
+            itm_lst = itm.rsplit("\u00B1", 1)
             itm_lst[-1] = \
-                f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
-            row.append(u"\u00B1".join(itm_lst))
-        for rca in rcas:
-            rca_nr = rca[u"data"].get(row[0], u"-")
-            row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
-
+                f"{' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
+            itm_str = "\u00B1".join(itm_lst)
+
+            if idx >= len(cols):
+                # Diffs
+                rca = rcas[idx - len(cols)]
+                if rca:
+                    # Add rcas to diffs
+                    rca_nr = rca["data"].get(row[0], None)
+                    if rca_nr:
+                        hdr_len = len(header[idx + 1]) - 1
+                        if hdr_len < 19:
+                            hdr_len = 19
+                        rca_nr = f"[{rca_nr}]"
+                        itm_str = (
+                            f"{' ' * (4 - len(rca_nr))}{rca_nr}"
+                            f"{' ' * (hdr_len - 4 - len(itm_str))}"
+                            f"{itm_str}"
+                        )
+            row.append(itm_str)
         tbl_final.append(row)
 
-    header = [u"Test Case", ]
-    header.extend([col[u"title"] for col in cols])
-    header.extend([comp.get(u"title", u"") for comp in comparisons])
-    header.extend([rca[u"title"] for rca in rcas])
-
     # Generate csv tables:
-    csv_file = f"{table[u'output-file']}.csv"
-    with open(csv_file, u"wt", encoding='utf-8') as file_handler:
-        file_handler.write(u";".join(header) + u"\n")
+    csv_file_name = f"{table['output-file']}.csv"
+    logging.info(f"    Writing the file {csv_file_name}")
+    with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
+        file_handler.write(";".join(header) + "\n")
         for test in tbl_final:
-            file_handler.write(u";".join([str(item) for item in test]) + u"\n")
+            file_handler.write(";".join([str(item) for item in test]) + "\n")
 
     # Generate txt table:
-    txt_file_name = f"{table[u'output-file']}.txt"
-    convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
+    txt_file_name = f"{table['output-file']}.txt"
+    logging.info(f"    Writing the file {txt_file_name}")
+    convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=";")
 
-    with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
-        txt_file.write(legend)
-        txt_file.write(footnote)
-        if legend or footnote:
-            txt_file.write(u"\n:END")
+    with open(txt_file_name, 'a', encoding='utf-8') as file_handler:
+        file_handler.write(legend)
+        file_handler.write(footnote)
 
     # Generate html table:
     _tpc_generate_html_table(
         header,
         tbl_final,
-        table[u'output-file'],
+        table['output-file'],
         legend=legend,
         footnote=footnote,
         sort_data=False,
-        title=table.get(u"title", u"")
+        title=table.get("title", "")
     )
 
 
@@ -1794,10 +2151,10 @@ def table_weekly_comparison(table, in_data):
     )
 
     header = [
-        [u"Version"],
-        [u"Date", ],
-        [u"Build", ],
-        [u"Testbed", ]
+        [u"VPP Version", ],
+        [u"Start Timestamp", ],
+        [u"CSIT Build", ],
+        [u"CSIT Testbed", ]
     ]
     tbl_dict = dict()
     idx = 0
@@ -1847,22 +2204,26 @@ def table_weekly_comparison(table, in_data):
         idx_cmp = cmp.get(u"compare", None)
         if idx_ref is None or idx_cmp is None:
             continue
-        header[0].append(f"Diff{idx + 1}")
-        header[1].append(header[0][idx_ref - idx - 1])
-        header[2].append(u"vs")
-        header[3].append(header[0][idx_cmp - idx - 1])
+        header[0].append(
+            f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
+            f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
+        )
+        header[1].append(u"")
+        header[2].append(u"")
+        header[3].append(u"")
         for tst_name, tst_data in tbl_dict.items():
             if not cmp_dict.get(tst_name, None):
                 cmp_dict[tst_name] = list()
             ref_data = tst_data.get(idx_ref, None)
             cmp_data = tst_data.get(idx_cmp, None)
             if ref_data is None or cmp_data is None:
-                cmp_dict[tst_name].append(float('nan'))
+                cmp_dict[tst_name].append(float(u'nan'))
             else:
                 cmp_dict[tst_name].append(
                     relative_change(ref_data, cmp_data)
                 )
 
+    tbl_lst_none = list()
     tbl_lst = list()
     for tst_name, tst_data in tbl_dict.items():
         itm_lst = [tst_data[u"name"], ]
@@ -1878,14 +2239,20 @@ def table_weekly_comparison(table, in_data):
                 for itm in cmp_dict[tst_name]
             ]
         )
-        tbl_lst.append(itm_lst)
+        if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
+            tbl_lst_none.append(itm_lst)
+        else:
+            tbl_lst.append(itm_lst)
 
+    tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
-    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+    tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
+    tbl_lst.extend(tbl_lst_none)
 
     # Generate csv table:
-    csv_file = f"{table[u'output-file']}.csv"
-    with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+    csv_file_name = f"{table[u'output-file']}.csv"
+    logging.info(f"    Writing the file {csv_file_name}")
+    with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
         for hdr in header:
             file_handler.write(u",".join(hdr) + u"\n")
         for test in tbl_lst:
@@ -1896,17 +2263,30 @@ def table_weekly_comparison(table, in_data):
                 ]
             ) + u"\n")
 
-    txt_file = f"{table[u'output-file']}.txt"
-    convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
+    txt_file_name = f"{table[u'output-file']}.txt"
+    logging.info(f"    Writing the file {txt_file_name}")
+    try:
+        convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
+    except Exception as err:
+        logging.error(repr(err))
+        for hdr in header:
+            logging.info(",".join(hdr))
+        for test in tbl_lst:
+            logging.info(",".join(
+                [
+                    str(item).replace(u"None", u"-").replace(u"nan", u"-").
+                    replace(u"null", u"-") for item in test
+                ]
+            ))
 
     # Reorganize header in txt table
     txt_table = list()
-    with open(txt_file, u"rt", encoding='utf-8') as file_handler:
-        for line in file_handler:
+    with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
+        for line in list(file_handler):
             txt_table.append(line)
     try:
         txt_table.insert(5, txt_table.pop(2))
-        with open(txt_file, u"wt", encoding='utf-8') as file_handler:
+        with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
             file_handler.writelines(txt_table)
     except IndexError:
         pass