Report: latency comparison
[csit.git] / resources / tools / presentation / generator_tables.py
index 74579b0..351250a 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2021 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 import logging
 import csv
-import prettytable
-import pandas as pd
+import math
+import re
 
-from string import replace
-from math import isnan
+from collections import OrderedDict
 from xml.etree import ElementTree as ET
+from datetime import datetime as dt
+from datetime import timedelta
+from copy import deepcopy
+from json import loads
+
+import plotly.graph_objects as go
+import plotly.offline as ploff
+import pandas as pd
+
+from numpy import nan, isnan
+from yaml import load, FullLoader, YAMLError
+
+from pal_utils import mean, stdev, classify_anomalies, \
+    convert_csv_to_pretty_txt, relative_change_stdev, relative_change
 
-from errors import PresentationError
-from utils import mean, stdev, relative_change, remove_outliers, find_outliers
+
+REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
 
 
 def generate_tables(spec, data):
@@ -37,18 +50,35 @@ def generate_tables(spec, data):
     :type data: InputData
     """
 
-    logging.info("Generating the tables ...")
+    generator = {
+        u"table_merged_details": table_merged_details,
+        u"table_soak_vs_ndr": table_soak_vs_ndr,
+        u"table_perf_trending_dash": table_perf_trending_dash,
+        u"table_perf_trending_dash_html": table_perf_trending_dash_html,
+        u"table_last_failed_tests": table_last_failed_tests,
+        u"table_failed_tests": table_failed_tests,
+        u"table_failed_tests_html": table_failed_tests_html,
+        u"table_oper_data_html": table_oper_data_html,
+        u"table_comparison": table_comparison,
+        u"table_weekly_comparison": table_weekly_comparison
+    }
+
+    logging.info(u"Generating the tables ...")
     for table in spec.tables:
         try:
-            eval(table["algorithm"])(table, data)
-        except NameError:
-            logging.error("The algorithm '{0}' is not defined.".
-                          format(table["algorithm"]))
-    logging.info("Done.")
-
-
-def table_details(table, input_data):
-    """Generate the table(s) with algorithm: table_detailed_test_results
+            if table[u"algorithm"] == u"table_weekly_comparison":
+                table[u"testbeds"] = spec.environment.get(u"testbeds", None)
+            generator[table[u"algorithm"]](table, data)
+        except NameError as err:
+            logging.error(
+                f"Probably algorithm {table[u'algorithm']} is not defined: "
+                f"{repr(err)}"
+            )
+    logging.info(u"Done.")
+
+
+def table_oper_data_html(table, input_data):
+    """Generate the table(s) with algorithm: html_table_oper_data
     specified in the specification file.
 
     :param table: Table to generate.
@@ -57,60 +87,238 @@ def table_details(table, input_data):
     :type input_data: InputData
     """
 
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
-
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
     # Transform the data
-    data = input_data.filter_data(table)
-
-    # Prepare the header of the tables
-    header = list()
-    for column in table["columns"]:
-        header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
+    logging.info(
+        f"    Creating the data set for the {table.get(u'type', u'')} "
+        f"{table.get(u'title', u'')}."
+    )
+    data = input_data.filter_data(
+        table,
+        params=[u"name", u"parent", u"telemetry-show-run", u"type"],
+        continue_on_error=True
+    )
+    if data.empty:
+        return
+    data = input_data.merge_data(data)
 
-    # Generate the data for the table according to the model in the table
-    # specification
-    job = table["data"].keys()[0]
-    build = str(table["data"][job][0])
-    try:
-        suites = input_data.suites(job, build)
-    except KeyError:
-        logging.error("    No data available. The table will not be generated.")
+    sort_tests = table.get(u"sort", None)
+    if sort_tests:
+        args = dict(
+            inplace=True,
+            ascending=(sort_tests == u"ascending")
+        )
+        data.sort_index(**args)
+
+    suites = input_data.filter_data(
+        table,
+        continue_on_error=True,
+        data_set=u"suites"
+    )
+    if suites.empty:
         return
+    suites = input_data.merge_data(suites)
 
-    for suite_longname, suite in suites.iteritems():
-        # Generate data
-        suite_name = suite["name"]
-        table_lst = list()
-        for test in data[job][build].keys():
-            if data[job][build][test]["parent"] in suite_name:
-                row_lst = list()
-                for column in table["columns"]:
-                    try:
-                        col_data = str(data[job][build][test][column["data"].
-                                       split(" ")[1]]).replace('"', '""')
-                        if column["data"].split(" ")[1] in ("vat-history",
-                                                            "show-run"):
-                            col_data = replace(col_data, " |br| ", "",
-                                               maxreplace=1)
-                            col_data = " |prein| {0} |preout| ".\
-                                format(col_data[:-5])
-                        row_lst.append('"{0}"'.format(col_data))
-                    except KeyError:
-                        row_lst.append("No data")
-                table_lst.append(row_lst)
+    def _generate_html_table(tst_data):
+        """Generate an HTML table with operational data for the given test.
 
-        # Write the data to file
-        if table_lst:
-            file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
-                                            table["output-file-ext"])
-            logging.info("      Writing file: '{}'".format(file_name))
-            with open(file_name, "w") as file_handler:
-                file_handler.write(",".join(header) + "\n")
-                for item in table_lst:
-                    file_handler.write(",".join(item) + "\n")
+        :param tst_data: Test data to be used to generate the table.
+        :type tst_data: pandas.Series
+        :returns: HTML table with operational data.
+        :rtype: str
+        """
 
-    logging.info("  Done.")
+        colors = {
+            u"header": u"#7eade7",
+            u"empty": u"#ffffff",
+            u"body": (u"#e9f1fb", u"#d4e4f7")
+        }
+
+        tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
+
+        trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
+        thead = ET.SubElement(
+            trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+        )
+        thead.text = tst_data[u"name"]
+
+        trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
+        thead = ET.SubElement(
+            trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+        )
+        thead.text = u"\t"
+
+        if tst_data.get(u"telemetry-show-run", None) is None or \
+                isinstance(tst_data[u"telemetry-show-run"], str):
+            trow = ET.SubElement(
+                tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
+            )
+            tcol = ET.SubElement(
+                trow, u"td", attrib=dict(align=u"left", colspan=u"6")
+            )
+            tcol.text = u"No Data"
+
+            trow = ET.SubElement(
+                tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
+            )
+            thead = ET.SubElement(
+                trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+            )
+            font = ET.SubElement(
+                thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
+            )
+            font.text = u"."
+            return str(ET.tostring(tbl, encoding=u"unicode"))
+
+        tbl_hdr = (
+            u"Name",
+            u"Nr of Vectors",
+            u"Nr of Packets",
+            u"Suspends",
+            u"Cycles per Packet",
+            u"Average Vector Size"
+        )
+
+        for dut_data in tst_data[u"telemetry-show-run"].values():
+            trow = ET.SubElement(
+                tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
+            )
+            tcol = ET.SubElement(
+                trow, u"td", attrib=dict(align=u"left", colspan=u"6")
+            )
+            if dut_data.get(u"runtime", None) is None:
+                tcol.text = u"No Data"
+                continue
+
+            runtime = dict()
+            for item in dut_data[u"runtime"].get(u"data", tuple()):
+                tid = int(item[u"labels"][u"thread_id"])
+                if runtime.get(tid, None) is None:
+                    runtime[tid] = dict()
+                gnode = item[u"labels"][u"graph_node"]
+                if runtime[tid].get(gnode, None) is None:
+                    runtime[tid][gnode] = dict()
+                try:
+                    runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
+                except ValueError:
+                    runtime[tid][gnode][item[u"name"]] = item[u"value"]
+
+            threads = dict({idx: list() for idx in range(len(runtime))})
+            for idx, run_data in runtime.items():
+                for gnode, gdata in run_data.items():
+                    if gdata[u"vectors"] > 0:
+                        clocks = gdata[u"clocks"] / gdata[u"vectors"]
+                    elif gdata[u"calls"] > 0:
+                        clocks = gdata[u"clocks"] / gdata[u"calls"]
+                    elif gdata[u"suspends"] > 0:
+                        clocks = gdata[u"clocks"] / gdata[u"suspends"]
+                    else:
+                        clocks = 0.0
+                    if gdata[u"calls"] > 0:
+                        vectors_call = gdata[u"vectors"] / gdata[u"calls"]
+                    else:
+                        vectors_call = 0.0
+                    if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
+                            int(gdata[u"suspends"]):
+                        threads[idx].append([
+                            gnode,
+                            int(gdata[u"calls"]),
+                            int(gdata[u"vectors"]),
+                            int(gdata[u"suspends"]),
+                            clocks,
+                            vectors_call
+                        ])
+
+            bold = ET.SubElement(tcol, u"b")
+            bold.text = (
+                f"Host IP: {dut_data.get(u'host', '')}, "
+                f"Socket: {dut_data.get(u'socket', '')}"
+            )
+            trow = ET.SubElement(
+                tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
+            )
+            thead = ET.SubElement(
+                trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+            )
+            thead.text = u"\t"
+
+            for thread_nr, thread in threads.items():
+                trow = ET.SubElement(
+                    tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
+                )
+                tcol = ET.SubElement(
+                    trow, u"td", attrib=dict(align=u"left", colspan=u"6")
+                )
+                bold = ET.SubElement(tcol, u"b")
+                bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
+                trow = ET.SubElement(
+                    tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
+                )
+                for idx, col in enumerate(tbl_hdr):
+                    tcol = ET.SubElement(
+                        trow, u"td",
+                        attrib=dict(align=u"right" if idx else u"left")
+                    )
+                    font = ET.SubElement(
+                        tcol, u"font", attrib=dict(size=u"2")
+                    )
+                    bold = ET.SubElement(font, u"b")
+                    bold.text = col
+                for row_nr, row in enumerate(thread):
+                    trow = ET.SubElement(
+                        tbl, u"tr",
+                        attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
+                    )
+                    for idx, col in enumerate(row):
+                        tcol = ET.SubElement(
+                            trow, u"td",
+                            attrib=dict(align=u"right" if idx else u"left")
+                        )
+                        font = ET.SubElement(
+                            tcol, u"font", attrib=dict(size=u"2")
+                        )
+                        if isinstance(col, float):
+                            font.text = f"{col:.2f}"
+                        else:
+                            font.text = str(col)
+                trow = ET.SubElement(
+                    tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
+                )
+                thead = ET.SubElement(
+                    trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+                )
+                thead.text = u"\t"
+
+        trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
+        thead = ET.SubElement(
+            trow, u"th", attrib=dict(align=u"left", colspan=u"6")
+        )
+        font = ET.SubElement(
+            thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
+        )
+        font.text = u"."
+
+        return str(ET.tostring(tbl, encoding=u"unicode"))
+
+    for suite in suites.values:
+        html_table = str()
+        for test_data in data.values:
+            if test_data[u"parent"] not in suite[u"name"]:
+                continue
+            html_table += _generate_html_table(test_data)
+        if not html_table:
+            continue
+        try:
+            file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
+            with open(f"{file_name}", u'w') as html_file:
+                logging.info(f"    Writing file: {file_name}")
+                html_file.write(u".. raw:: html\n\n\t")
+                html_file.write(html_table)
+                html_file.write(u"\n\t<p><br><br></p>\n")
+        except KeyError:
+            logging.warning(u"The output file is not defined.")
+            return
+    logging.info(u"  Done.")
 
 
 def table_merged_details(table, input_data):
@@ -123,226 +331,354 @@ def table_merged_details(table, input_data):
     :type input_data: InputData
     """
 
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
 
     # Transform the data
-    data = input_data.filter_data(table)
+    logging.info(
+        f"    Creating the data set for the {table.get(u'type', u'')} "
+        f"{table.get(u'title', u'')}."
+    )
+    data = input_data.filter_data(table, continue_on_error=True)
     data = input_data.merge_data(data)
-    data.sort_index(inplace=True)
 
-    suites = input_data.filter_data(table, data_set="suites")
+    sort_tests = table.get(u"sort", None)
+    if sort_tests:
+        args = dict(
+            inplace=True,
+            ascending=(sort_tests == u"ascending")
+        )
+        data.sort_index(**args)
+
+    suites = input_data.filter_data(
+        table, continue_on_error=True, data_set=u"suites")
     suites = input_data.merge_data(suites)
 
     # Prepare the header of the tables
     header = list()
-    for column in table["columns"]:
-        header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
+    for column in table[u"columns"]:
+        header.append(
+            u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
+        )
 
-    for _, suite in suites.iteritems():
+    for suite in suites.values:
         # Generate data
-        suite_name = suite["name"]
+        suite_name = suite[u"name"]
         table_lst = list()
         for test in data.keys():
-            if data[test]["parent"] in suite_name:
-                row_lst = list()
-                for column in table["columns"]:
-                    try:
-                        col_data = str(data[test][column["data"].
-                                       split(" ")[1]]).replace('"', '""')
-                        if column["data"].split(" ")[1] in ("vat-history",
-                                                            "show-run"):
-                            col_data = replace(col_data, " |br| ", "",
-                                               maxreplace=1)
-                            col_data = " |prein| {0} |preout| ".\
-                                format(col_data[:-5])
-                        row_lst.append('"{0}"'.format(col_data))
-                    except KeyError:
-                        row_lst.append("No data")
+            if data[test][u"status"] != u"PASS" or \
+                    data[test][u"parent"] not in suite_name:
+                continue
+            row_lst = list()
+            for column in table[u"columns"]:
+                try:
+                    col_data = str(data[test][column[
+                        u"data"].split(u" ")[1]]).replace(u'"', u'""')
+                    # Do not include tests with "Test Failed" in test message
+                    if u"Test Failed" in col_data:
+                        continue
+                    col_data = col_data.replace(
+                        u"No Data", u"Not Captured     "
+                    )
+                    if column[u"data"].split(u" ")[1] in (u"name", ):
+                        if len(col_data) > 30:
+                            col_data_lst = col_data.split(u"-")
+                            half = int(len(col_data_lst) / 2)
+                            col_data = f"{u'-'.join(col_data_lst[:half])}" \
+                                       f"- |br| " \
+                                       f"{u'-'.join(col_data_lst[half:])}"
+                        col_data = f" |prein| {col_data} |preout| "
+                    elif column[u"data"].split(u" ")[1] in (u"msg", ):
+                        # Temporary solution: remove NDR results from message:
+                        if bool(table.get(u'remove-ndr', False)):
+                            try:
+                                col_data = col_data.split(u"\n", 1)[1]
+                            except IndexError:
+                                pass
+                        col_data = col_data.replace(u'\n', u' |br| ').\
+                            replace(u'\r', u'').replace(u'"', u"'")
+                        col_data = f" |prein| {col_data} |preout| "
+                    elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
+                        col_data = col_data.replace(u'\n', u' |br| ')
+                        col_data = f" |prein| {col_data[:-5]} |preout| "
+                    row_lst.append(f'"{col_data}"')
+                except KeyError:
+                    row_lst.append(u'"Not captured"')
+            if len(row_lst) == len(table[u"columns"]):
                 table_lst.append(row_lst)
 
         # Write the data to file
         if table_lst:
-            file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
-                                            table["output-file-ext"])
-            logging.info("      Writing file: '{}'".format(file_name))
-            with open(file_name, "w") as file_handler:
-                file_handler.write(",".join(header) + "\n")
+            separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
+            file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
+            logging.info(f"      Writing file: {file_name}")
+            with open(file_name, u"wt") as file_handler:
+                file_handler.write(u",".join(header) + u"\n")
                 for item in table_lst:
-                    file_handler.write(",".join(item) + "\n")
+                    file_handler.write(u",".join(item) + u"\n")
 
-    logging.info("  Done.")
+    logging.info(u"  Done.")
 
 
-def table_performance_improvements(table, input_data):
-    """Generate the table(s) with algorithm: table_performance_improvements
-    specified in the specification file.
+def _tpc_modify_test_name(test_name, ignore_nic=False):
+    """Modify a test name by replacing its parts.
 
-    :param table: Table to generate.
-    :param input_data: Data to process.
-    :type table: pandas.Series
-    :type input_data: InputData
+    :param test_name: Test name to be modified.
+    :param ignore_nic: If True, NIC is removed from TC name.
+    :type test_name: str
+    :type ignore_nic: bool
+    :returns: Modified test name.
+    :rtype: str
     """
-
-    def _write_line_to_file(file_handler, data):
-        """Write a line to the .csv file.
-
-        :param file_handler: File handler for the csv file. It must be open for
-         writing text.
-        :param data: Item to be written to the file.
-        :type file_handler: BinaryIO
-        :type data: list
-        """
-
-        line_lst = list()
-        for item in data:
-            if isinstance(item["data"], str):
-                # Remove -?drdisc from the end
-                if item["data"].endswith("drdisc"):
-                    item["data"] = item["data"][:-8]
-                line_lst.append(item["data"])
-            elif isinstance(item["data"], float):
-                line_lst.append("{:.1f}".format(item["data"]))
-            elif item["data"] is None:
-                line_lst.append("")
-        file_handler.write(",".join(line_lst) + "\n")
-
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
-
-    # Read the template
-    file_name = table.get("template", None)
-    if file_name:
-        try:
-            tmpl = _read_csv_template(file_name)
-        except PresentationError:
-            logging.error("  The template '{0}' does not exist. Skipping the "
-                          "table.".format(file_name))
-            return None
-    else:
-        logging.error("The template is not defined. Skipping the table.")
-        return None
-
-    # Transform the data
-    data = input_data.filter_data(table)
-
-    # Prepare the header of the tables
-    header = list()
-    for column in table["columns"]:
-        header.append(column["title"])
-
-    # Generate the data for the table according to the model in the table
-    # specification
-    tbl_lst = list()
-    for tmpl_item in tmpl:
-        tbl_item = list()
-        for column in table["columns"]:
-            cmd = column["data"].split(" ")[0]
-            args = column["data"].split(" ")[1:]
-            if cmd == "template":
-                try:
-                    val = float(tmpl_item[int(args[0])])
-                except ValueError:
-                    val = tmpl_item[int(args[0])]
-                tbl_item.append({"data": val})
-            elif cmd == "data":
-                jobs = args[0:-1]
-                operation = args[-1]
-                data_lst = list()
-                for job in jobs:
-                    for build in data[job]:
-                        try:
-                            data_lst.append(float(build[tmpl_item[0]]
-                                                  ["throughput"]["value"]))
-                        except (KeyError, TypeError):
-                            # No data, ignore
-                            continue
-                if data_lst:
-                    tbl_item.append({"data": (eval(operation)(data_lst)) /
-                                             1000000})
-                else:
-                    tbl_item.append({"data": None})
-            elif cmd == "operation":
-                operation = args[0]
-                try:
-                    nr1 = float(tbl_item[int(args[1])]["data"])
-                    nr2 = float(tbl_item[int(args[2])]["data"])
-                    if nr1 and nr2:
-                        tbl_item.append({"data": eval(operation)(nr1, nr2)})
-                    else:
-                        tbl_item.append({"data": None})
-                except (IndexError, ValueError, TypeError):
-                    logging.error("No data for {0}".format(tbl_item[0]["data"]))
-                    tbl_item.append({"data": None})
-                    continue
-            else:
-                logging.error("Not supported command {0}. Skipping the table.".
-                              format(cmd))
-                return None
-        tbl_lst.append(tbl_item)
-
-    # Sort the table according to the relative change
-    tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
-
-    # Create the tables and write them to the files
-    file_names = [
-        "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
-        "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
-        "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
-        "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
-    ]
-
-    for file_name in file_names:
-        logging.info("    Writing the file '{0}'".format(file_name))
-        with open(file_name, "w") as file_handler:
-            file_handler.write(",".join(header) + "\n")
-            for item in tbl_lst:
-                if isinstance(item[-1]["data"], float):
-                    rel_change = round(item[-1]["data"], 1)
-                else:
-                    rel_change = item[-1]["data"]
-                if "ndr_top" in file_name \
-                        and "ndr" in item[0]["data"] \
-                        and rel_change >= 10.0:
-                    _write_line_to_file(file_handler, item)
-                elif "pdr_top" in file_name \
-                        and "pdr" in item[0]["data"] \
-                        and rel_change >= 10.0:
-                    _write_line_to_file(file_handler, item)
-                elif "ndr_low" in file_name \
-                        and "ndr" in item[0]["data"] \
-                        and rel_change < 10.0:
-                    _write_line_to_file(file_handler, item)
-                elif "pdr_low" in file_name \
-                        and "pdr" in item[0]["data"] \
-                        and rel_change < 10.0:
-                    _write_line_to_file(file_handler, item)
-
-    logging.info("  Done.")
-
-
-def _read_csv_template(file_name):
-    """Read the template from a .csv file.
-
-    :param file_name: Name / full path / relative path of the file to read.
-    :type file_name: str
-    :returns: Data from the template as list (lines) of lists (items on line).
-    :rtype: list
-    :raises: PresentationError if it is not possible to read the file.
+    test_name_mod = test_name.\
+        replace(u"-ndrpdr", u"").\
+        replace(u"1t1c", u"1c").\
+        replace(u"2t1c", u"1c"). \
+        replace(u"2t2c", u"2c").\
+        replace(u"4t2c", u"2c"). \
+        replace(u"4t4c", u"4c").\
+        replace(u"8t4c", u"4c")
+
+    if ignore_nic:
+        return re.sub(REGEX_NIC, u"", test_name_mod)
+    return test_name_mod
+
+
+def _tpc_modify_displayed_test_name(test_name):
+    """Modify a test name which is displayed in a table by replacing its parts.
+
+    :param test_name: Test name to be modified.
+    :type test_name: str
+    :returns: Modified test name.
+    :rtype: str
+    """
+    return test_name.\
+        replace(u"1t1c", u"1c").\
+        replace(u"2t1c", u"1c"). \
+        replace(u"2t2c", u"2c").\
+        replace(u"4t2c", u"2c"). \
+        replace(u"4t4c", u"4c").\
+        replace(u"8t4c", u"4c")
+
+
+def _tpc_insert_data(target, src, include_tests):
+    """Insert src data to the target structure.
+
+    :param target: Target structure where the data is placed.
+    :param src: Source data to be placed into the target structure.
+    :param include_tests: Which results will be included (MRR, NDR, PDR).
+    :type target: list
+    :type src: dict
+    :type include_tests: str
     """
-
     try:
-        with open(file_name, 'r') as csv_file:
-            tmpl_data = list()
-            for line in csv_file:
-                tmpl_data.append(line[:-1].split(","))
-        return tmpl_data
-    except IOError as err:
-        raise PresentationError(str(err), level="ERROR")
+        if include_tests == u"MRR":
+            target[u"mean"] = src[u"result"][u"receive-rate"]
+            target[u"stdev"] = src[u"result"][u"receive-stdev"]
+        elif include_tests == u"PDR":
+            target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
+        elif include_tests == u"NDR":
+            target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
+        elif u"latency" in include_tests:
+            keys = include_tests.split(u"-")
+            if len(keys) == 4:
+                lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
+                target[u"data"].append(
+                    float(u"nan") if lat == -1 else lat * 1e6
+                )
+    except (KeyError, TypeError):
+        pass
+
+
+def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
+                             footnote=u"", sort_data=True, title=u"",
+                             generate_rst=True):
+    """Generate html table from input data with simple sorting possibility.
+
+    :param header: Table header.
+    :param data: Input data to be included in the table. It is a list of lists.
+        Inner lists are rows in the table. All inner lists must be of the same
+        length. The length of these lists must be the same as the length of the
+        header.
+    :param out_file_name: The name (relative or full path) where the
+        generated html table is written.
+    :param legend: The legend to display below the table.
+    :param footnote: The footnote to display below the table (and legend).
+    :param sort_data: If True the data sorting is enabled.
+    :param title: The table (and file) title.
+    :param generate_rst: If True, wrapping rst file is generated.
+    :type header: list
+    :type data: list of lists
+    :type out_file_name: str
+    :type legend: str
+    :type footnote: str
+    :type sort_data: bool
+    :type title: str
+    :type generate_rst: bool
+    """
 
+    try:
+        idx = header.index(u"Test Case")
+    except ValueError:
+        idx = 0
+    params = {
+        u"align-hdr": (
+            [u"left", u"right"],
+            [u"left", u"left", u"right"],
+            [u"left", u"left", u"left", u"right"]
+        ),
+        u"align-itm": (
+            [u"left", u"right"],
+            [u"left", u"left", u"right"],
+            [u"left", u"left", u"left", u"right"]
+        ),
+        u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
+    }
+
+    df_data = pd.DataFrame(data, columns=header)
+
+    if sort_data:
+        df_sorted = [df_data.sort_values(
+            by=[key, header[idx]], ascending=[True, True]
+            if key != header[idx] else [False, True]) for key in header]
+        df_sorted_rev = [df_data.sort_values(
+            by=[key, header[idx]], ascending=[False, True]
+            if key != header[idx] else [True, True]) for key in header]
+        df_sorted.extend(df_sorted_rev)
+    else:
+        df_sorted = df_data
+
+    fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
+                   for idx in range(len(df_data))]]
+    table_header = dict(
+        values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
+        fill_color=u"#7eade7",
+        align=params[u"align-hdr"][idx],
+        font=dict(
+            family=u"Courier New",
+            size=12
+        )
+    )
+
+    fig = go.Figure()
+
+    if sort_data:
+        for table in df_sorted:
+            columns = [table.get(col) for col in header]
+            fig.add_trace(
+                go.Table(
+                    columnwidth=params[u"width"][idx],
+                    header=table_header,
+                    cells=dict(
+                        values=columns,
+                        fill_color=fill_color,
+                        align=params[u"align-itm"][idx],
+                        font=dict(
+                            family=u"Courier New",
+                            size=12
+                        )
+                    )
+                )
+            )
+
+        buttons = list()
+        menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
+        menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
+        for idx, hdr in enumerate(menu_items):
+            visible = [False, ] * len(menu_items)
+            visible[idx] = True
+            buttons.append(
+                dict(
+                    label=hdr.replace(u" [Mpps]", u""),
+                    method=u"update",
+                    args=[{u"visible": visible}],
+                )
+            )
+
+        fig.update_layout(
+            updatemenus=[
+                go.layout.Updatemenu(
+                    type=u"dropdown",
+                    direction=u"down",
+                    x=0.0,
+                    xanchor=u"left",
+                    y=1.002,
+                    yanchor=u"bottom",
+                    active=len(menu_items) - 1,
+                    buttons=list(buttons)
+                )
+            ],
+        )
+    else:
+        fig.add_trace(
+            go.Table(
+                columnwidth=params[u"width"][idx],
+                header=table_header,
+                cells=dict(
+                    values=[df_sorted.get(col) for col in header],
+                    fill_color=fill_color,
+                    align=params[u"align-itm"][idx],
+                    font=dict(
+                        family=u"Courier New",
+                        size=12
+                    )
+                )
+            )
+        )
+
+    ploff.plot(
+        fig,
+        show_link=False,
+        auto_open=False,
+        filename=f"{out_file_name}_in.html"
+    )
+
+    if not generate_rst:
+        return
 
-def table_performance_comparison(table, input_data):
-    """Generate the table(s) with algorithm: table_performance_comparison
+    file_name = out_file_name.split(u"/")[-1]
+    if u"vpp" in out_file_name:
+        path = u"_tmp/src/vpp_performance_tests/comparisons/"
+    else:
+        path = u"_tmp/src/dpdk_performance_tests/comparisons/"
+    logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
+    with open(f"{path}{file_name}.rst", u"wt") as rst_file:
+        rst_file.write(
+            u"\n"
+            u".. |br| raw:: html\n\n    <br />\n\n\n"
+            u".. |prein| raw:: html\n\n    <pre>\n\n\n"
+            u".. |preout| raw:: html\n\n    </pre>\n\n"
+        )
+        if title:
+            rst_file.write(f"{title}\n")
+            rst_file.write(f"{u'`' * len(title)}\n\n")
+        rst_file.write(
+            u".. raw:: html\n\n"
+            f'    <iframe frameborder="0" scrolling="no" '
+            f'width="1600" height="1200" '
+            f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
+            f'</iframe>\n\n'
+        )
+
+        if legend:
+            try:
+                itm_lst = legend[1:-2].split(u"\n")
+                rst_file.write(
+                    f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
+                )
+            except IndexError as err:
+                logging.error(f"Legend cannot be written to html file\n{err}")
+        if footnote:
+            try:
+                itm_lst = footnote[1:].split(u"\n")
+                rst_file.write(
+                    f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
+                )
+            except IndexError as err:
+                logging.error(f"Footnote cannot be written to html file\n{err}")
+
+
+def table_soak_vs_ndr(table, input_data):
+    """Generate the table(s) with algorithm: table_soak_vs_ndr
     specified in the specification file.
 
     :param table: Table to generate.
@@ -351,187 +687,179 @@ def table_performance_comparison(table, input_data):
     :type input_data: InputData
     """
 
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
 
     # Transform the data
+    logging.info(
+        f"    Creating the data set for the {table.get(u'type', u'')} "
+        f"{table.get(u'title', u'')}."
+    )
     data = input_data.filter_data(table, continue_on_error=True)
 
-    # Prepare the header of the tables
+    # Prepare the header of the table
     try:
-        header = ["Test case",
-                  "{0} Throughput [Mpps]".format(table["reference"]["title"]),
-                  "{0} stdev [Mpps]".format(table["reference"]["title"]),
-                  "{0} Throughput [Mpps]".format(table["compare"]["title"]),
-                  "{0} stdev [Mpps]".format(table["compare"]["title"]),
-                  "Change [%]"]
-        header_str = ",".join(header) + "\n"
+        header = [
+            u"Test Case",
+            f"Avg({table[u'reference'][u'title']})",
+            f"Stdev({table[u'reference'][u'title']})",
+            f"Avg({table[u'compare'][u'title']})",
+            f"Stdev{table[u'compare'][u'title']})",
+            u"Diff",
+            u"Stdev(Diff)"
+        ]
+        header_str = u";".join(header) + u"\n"
+        legend = (
+            u"\nLegend:\n"
+            f"Avg({table[u'reference'][u'title']}): "
+            f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
+            f"from a series of runs of the listed tests.\n"
+            f"Stdev({table[u'reference'][u'title']}): "
+            f"Standard deviation value of {table[u'reference'][u'title']} "
+            f"[Mpps] computed from a series of runs of the listed tests.\n"
+            f"Avg({table[u'compare'][u'title']}): "
+            f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
+            f"a series of runs of the listed tests.\n"
+            f"Stdev({table[u'compare'][u'title']}): "
+            f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
+            f"computed from a series of runs of the listed tests.\n"
+            f"Diff({table[u'reference'][u'title']},"
+            f"{table[u'compare'][u'title']}): "
+            f"Percentage change calculated for mean values.\n"
+            u"Stdev(Diff): "
+            u"Standard deviation of percentage change calculated for mean "
+            u"values."
+        )
     except (AttributeError, KeyError) as err:
-        logging.error("The model is invalid, missing parameter: {0}".
-                      format(err))
+        logging.error(f"The model is invalid, missing parameter: {repr(err)}")
         return
 
-    # Prepare data to the table:
+    # Create a list of available SOAK test results:
     tbl_dict = dict()
-    for job, builds in table["reference"]["data"].items():
+    for job, builds in table[u"compare"][u"data"].items():
         for build in builds:
-            for tst_name, tst_data in data[job][str(build)].iteritems():
-                if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "ref-data": list(),
-                                          "cmp-data": list()}
-                try:
-                    tbl_dict[tst_name]["ref-data"].\
-                        append(tst_data["throughput"]["value"])
-                except TypeError:
-                    pass  # No data in output.xml for this test
-
-    for job, builds in table["compare"]["data"].items():
+            for tst_name, tst_data in data[job][str(build)].items():
+                if tst_data[u"type"] == u"SOAK":
+                    tst_name_mod = tst_name.replace(u"-soak", u"")
+                    if tbl_dict.get(tst_name_mod, None) is None:
+                        groups = re.search(REGEX_NIC, tst_data[u"parent"])
+                        nic = groups.group(0) if groups else u""
+                        name = (
+                            f"{nic}-"
+                            f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+                        )
+                        tbl_dict[tst_name_mod] = {
+                            u"name": name,
+                            u"ref-data": list(),
+                            u"cmp-data": list()
+                        }
+                    try:
+                        tbl_dict[tst_name_mod][u"cmp-data"].append(
+                            tst_data[u"throughput"][u"LOWER"])
+                    except (KeyError, TypeError):
+                        pass
+    tests_lst = tbl_dict.keys()
+
+    # Add corresponding NDR test results:
+    for job, builds in table[u"reference"][u"data"].items():
         for build in builds:
-            for tst_name, tst_data in data[job][str(build)].iteritems():
+            for tst_name, tst_data in data[job][str(build)].items():
+                tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
+                    replace(u"-mrr", u"")
+                if tst_name_mod not in tests_lst:
+                    continue
                 try:
-                    tbl_dict[tst_name]["cmp-data"].\
-                        append(tst_data["throughput"]["value"])
-                except KeyError:
-                    pass
-                except TypeError:
-                    tbl_dict.pop(tst_name, None)
+                    if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
+                        continue
+                    if table[u"include-tests"] == u"MRR":
+                        result = (tst_data[u"result"][u"receive-rate"],
+                                  tst_data[u"result"][u"receive-stdev"])
+                    elif table[u"include-tests"] == u"PDR":
+                        result = \
+                            tst_data[u"throughput"][u"PDR"][u"LOWER"]
+                    elif table[u"include-tests"] == u"NDR":
+                        result = \
+                            tst_data[u"throughput"][u"NDR"][u"LOWER"]
+                    else:
+                        result = None
+                    if result is not None:
+                        tbl_dict[tst_name_mod][u"ref-data"].append(
+                            result)
+                except (KeyError, TypeError):
+                    continue
 
     tbl_lst = list()
-    for tst_name in tbl_dict.keys():
-        item = [tbl_dict[tst_name]["name"], ]
-        if tbl_dict[tst_name]["ref-data"]:
-            data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
-                                     table["outlier-const"])
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+    for tst_name in tbl_dict:
+        item = [tbl_dict[tst_name][u"name"], ]
+        data_r = tbl_dict[tst_name][u"ref-data"]
+        if data_r:
+            if table[u"include-tests"] == u"MRR":
+                data_r_mean = data_r[0][0]
+                data_r_stdev = data_r[0][1]
+            else:
+                data_r_mean = mean(data_r)
+                data_r_stdev = stdev(data_r)
+            item.append(round(data_r_mean / 1e6, 1))
+            item.append(round(data_r_stdev / 1e6, 1))
         else:
+            data_r_mean = None
+            data_r_stdev = None
             item.extend([None, None])
-        if tbl_dict[tst_name]["cmp-data"]:
-            data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
-                                     table["outlier-const"])
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+        data_c = tbl_dict[tst_name][u"cmp-data"]
+        if data_c:
+            if table[u"include-tests"] == u"MRR":
+                data_c_mean = data_c[0][0]
+                data_c_stdev = data_c[0][1]
+            else:
+                data_c_mean = mean(data_c)
+                data_c_stdev = stdev(data_c)
+            item.append(round(data_c_mean / 1e6, 1))
+            item.append(round(data_c_stdev / 1e6, 1))
         else:
+            data_c_mean = None
+            data_c_stdev = None
             item.extend([None, None])
-        if item[1] is not None and item[3] is not None:
-            item.append(int(relative_change(float(item[1]), float(item[3]))))
-        if len(item) == 6:
+        if data_r_mean is not None and data_c_mean is not None:
+            delta, d_stdev = relative_change_stdev(
+                data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
+            try:
+                item.append(round(delta))
+            except ValueError:
+                item.append(delta)
+            try:
+                item.append(round(d_stdev))
+            except ValueError:
+                item.append(d_stdev)
             tbl_lst.append(item)
 
     # Sort the table according to the relative change
     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
 
-    # Generate tables:
-    # All tests in csv:
-    tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-                 ]
-    for file_name in tbl_names:
-        logging.info("      Writing file: '{0}'".format(file_name))
-        with open(file_name, "w") as file_handler:
-            file_handler.write(header_str)
-            for test in tbl_lst:
-                if (file_name.split("-")[-3] in test[0] and    # NDR vs PDR
-                        file_name.split("-")[-2] in test[0]):  # cores
-                    test[0] = "-".join(test[0].split("-")[:-1])
-                    file_handler.write(",".join([str(item) for item in test]) +
-                                       "\n")
-
-    # All tests in txt:
-    tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
-                     "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
-                     "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
-                     "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
-                     "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
-                     "{0}-pdr-4t4c-full.txt".format(table["output-file"])
-                     ]
-
-    for i, txt_name in enumerate(tbl_names_txt):
-        txt_table = None
-        logging.info("      Writing file: '{0}'".format(txt_name))
-        with open(tbl_names[i], 'rb') as csv_file:
-            csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
-            for row in csv_content:
-                if txt_table is None:
-                    txt_table = prettytable.PrettyTable(row)
-                else:
-                    txt_table.add_row(row)
-            txt_table.align["Test case"] = "l"
-        with open(txt_name, "w") as txt_file:
-            txt_file.write(str(txt_table))
-
-    # Selected tests in csv:
-    input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    with open(input_file, "r") as in_file:
-        lines = list()
-        for line in in_file:
-            lines.append(line)
-
-    output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[1:]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-    output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
-                                                  table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[-1:0:-1]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-    input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    with open(input_file, "r") as in_file:
-        lines = list()
-        for line in in_file:
-            lines.append(line)
-
-    output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[1:]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-    output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
-                                                  table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[-1:0:-1]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-
-def table_performance_comparison_mrr(table, input_data):
-    """Generate the table(s) with algorithm: table_performance_comparison_mrr
+    # Generate csv tables:
+    csv_file_name = f"{table[u'output-file']}.csv"
+    with open(csv_file_name, u"wt") as file_handler:
+        file_handler.write(header_str)
+        for test in tbl_lst:
+            file_handler.write(u";".join([str(item) for item in test]) + u"\n")
+
+    convert_csv_to_pretty_txt(
+        csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
+    )
+    with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
+        file_handler.write(legend)
+
+    # Generate html table:
+    _tpc_generate_html_table(
+        header,
+        tbl_lst,
+        table[u'output-file'],
+        legend=legend,
+        title=table.get(u"title", u"")
+    )
+
+
+def table_perf_trending_dash(table, input_data):
+    """Generate the table(s) with algorithm:
+    table_perf_trending_dash
     specified in the specification file.
 
     :param table: Table to generate.
@@ -540,122 +868,463 @@ def table_performance_comparison_mrr(table, input_data):
     :type input_data: InputData
     """
 
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
 
     # Transform the data
+    logging.info(
+        f"    Creating the data set for the {table.get(u'type', u'')} "
+        f"{table.get(u'title', u'')}."
+    )
     data = input_data.filter_data(table, continue_on_error=True)
 
     # Prepare the header of the tables
-    try:
-        header = ["Test case",
-                  "{0} Throughput [Mpps]".format(table["reference"]["title"]),
-                  "{0} stdev [Mpps]".format(table["reference"]["title"]),
-                  "{0} Throughput [Mpps]".format(table["compare"]["title"]),
-                  "{0} stdev [Mpps]".format(table["compare"]["title"]),
-                  "Change [%]"]
-        header_str = ",".join(header) + "\n"
-    except (AttributeError, KeyError) as err:
-        logging.error("The model is invalid, missing parameter: {0}".
-                      format(err))
-        return
+    header = [
+        u"Test Case",
+        u"Trend [Mpps]",
+        u"Short-Term Change [%]",
+        u"Long-Term Change [%]",
+        u"Regressions [#]",
+        u"Progressions [#]"
+    ]
+    header_str = u",".join(header) + u"\n"
+
+    incl_tests = table.get(u"include-tests", u"MRR")
 
     # Prepare data to the table:
     tbl_dict = dict()
-    for job, builds in table["reference"]["data"].items():
+    for job, builds in table[u"data"].items():
         for build in builds:
-            for tst_name, tst_data in data[job][str(build)].iteritems():
+            for tst_name, tst_data in data[job][str(build)].items():
+                if tst_name.lower() in table.get(u"ignore-list", list()):
+                    continue
                 if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "ref-data": list(),
-                                          "cmp-data": list()}
+                    groups = re.search(REGEX_NIC, tst_data[u"parent"])
+                    if not groups:
+                        continue
+                    nic = groups.group(0)
+                    tbl_dict[tst_name] = {
+                        u"name": f"{nic}-{tst_data[u'name']}",
+                        u"data": OrderedDict()
+                    }
                 try:
-                    tbl_dict[tst_name]["ref-data"].\
-                        append(tst_data["result"]["throughput"])
-                except TypeError:
+                    if incl_tests == u"MRR":
+                        tbl_dict[tst_name][u"data"][str(build)] = \
+                            tst_data[u"result"][u"receive-rate"]
+                    elif incl_tests == u"NDR":
+                        tbl_dict[tst_name][u"data"][str(build)] = \
+                            tst_data[u"throughput"][u"NDR"][u"LOWER"]
+                    elif incl_tests == u"PDR":
+                        tbl_dict[tst_name][u"data"][str(build)] = \
+                            tst_data[u"throughput"][u"PDR"][u"LOWER"]
+                except (TypeError, KeyError):
                     pass  # No data in output.xml for this test
 
-    for job, builds in table["compare"]["data"].items():
-        for build in builds:
-            for tst_name, tst_data in data[job][str(build)].iteritems():
-                try:
-                    tbl_dict[tst_name]["cmp-data"].\
-                        append(tst_data["result"]["throughput"])
-                except KeyError:
-                    pass
-                except TypeError:
-                    tbl_dict.pop(tst_name, None)
-
     tbl_lst = list()
-    for tst_name in tbl_dict.keys():
-        item = [tbl_dict[tst_name]["name"], ]
-        if tbl_dict[tst_name]["ref-data"]:
-            data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
-                                     table["outlier-const"])
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+    for tst_name in tbl_dict:
+        data_t = tbl_dict[tst_name][u"data"]
+        if len(data_t) < 2:
+            continue
+
+        try:
+            classification_lst, avgs, _ = classify_anomalies(data_t)
+        except ValueError as err:
+            logging.info(f"{err} Skipping")
+            return
+
+        win_size = min(len(data_t), table[u"window"])
+        long_win_size = min(len(data_t), table[u"long-trend-window"])
+
+        try:
+            max_long_avg = max(
+                [x for x in avgs[-long_win_size:-win_size]
+                 if not isnan(x)])
+        except ValueError:
+            max_long_avg = nan
+        last_avg = avgs[-1]
+        avg_week_ago = avgs[max(-win_size, -len(avgs))]
+
+        if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
+            rel_change_last = nan
         else:
-            item.extend([None, None])
-        if tbl_dict[tst_name]["cmp-data"]:
-            data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
-                                     table["outlier-const"])
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+            rel_change_last = round(
+                ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
+
+        if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
+            rel_change_long = nan
         else:
-            item.extend([None, None])
-        if item[1] is not None and item[3] is not None and item[1] != 0:
-            item.append(int(relative_change(float(item[1]), float(item[3]))))
-        if len(item) == 6:
-            tbl_lst.append(item)
+            rel_change_long = round(
+                ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
+
+        if classification_lst:
+            if isnan(rel_change_last) and isnan(rel_change_long):
+                continue
+            if isnan(last_avg) or isnan(rel_change_last) or \
+                    isnan(rel_change_long):
+                continue
+            tbl_lst.append(
+                [tbl_dict[tst_name][u"name"],
+                 round(last_avg / 1e6, 2),
+                 rel_change_last,
+                 rel_change_long,
+                 classification_lst[-win_size+1:].count(u"regression"),
+                 classification_lst[-win_size+1:].count(u"progression")])
+
+    tbl_lst.sort(key=lambda rel: rel[0])
+    tbl_lst.sort(key=lambda rel: rel[3])
+    tbl_lst.sort(key=lambda rel: rel[2])
 
-    # Sort the table according to the relative change
-    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+    tbl_sorted = list()
+    for nrr in range(table[u"window"], -1, -1):
+        tbl_reg = [item for item in tbl_lst if item[4] == nrr]
+        for nrp in range(table[u"window"], -1, -1):
+            tbl_out = [item for item in tbl_reg if item[5] == nrp]
+            tbl_sorted.extend(tbl_out)
 
-    # Generate tables:
-    # All tests in csv:
-    tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
-                                           table["output-file-ext"]),
-                 "{0}-2t2c-full{1}".format(table["output-file"],
-                                           table["output-file-ext"]),
-                 "{0}-4t4c-full{1}".format(table["output-file"],
-                                           table["output-file-ext"])
-                 ]
-    for file_name in tbl_names:
-        logging.info("      Writing file: '{0}'".format(file_name))
-        with open(file_name, "w") as file_handler:
-            file_handler.write(header_str)
-            for test in tbl_lst:
-                if file_name.split("-")[-2] in test[0]:  # cores
-                    test[0] = "-".join(test[0].split("-")[:-1])
-                    file_handler.write(",".join([str(item) for item in test]) +
-                                       "\n")
-
-    # All tests in txt:
-    tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
-                     "{0}-2t2c-full.txt".format(table["output-file"]),
-                     "{0}-4t4c-full.txt".format(table["output-file"])
-                     ]
-
-    for i, txt_name in enumerate(tbl_names_txt):
-        txt_table = None
-        logging.info("      Writing file: '{0}'".format(txt_name))
-        with open(tbl_names[i], 'rb') as csv_file:
-            csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
-            for row in csv_content:
-                if txt_table is None:
-                    txt_table = prettytable.PrettyTable(row)
-                else:
-                    txt_table.add_row(row)
-            txt_table.align["Test case"] = "l"
-        with open(txt_name, "w") as txt_file:
-            txt_file.write(str(txt_table))
+    file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
+
+    logging.info(f"    Writing file: {file_name}")
+    with open(file_name, u"wt") as file_handler:
+        file_handler.write(header_str)
+        for test in tbl_sorted:
+            file_handler.write(u",".join([str(item) for item in test]) + u'\n')
 
+    logging.info(f"    Writing file: {table[u'output-file']}.txt")
+    convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
+
+
+def _generate_url(testbed, test_name):
+    """Generate URL to a trending plot from the name of the test case.
+
+    :param testbed: The testbed used for testing.
+    :param test_name: The name of the test case.
+    :type testbed: str
+    :type test_name: str
+    :returns: The URL to the plot with the trending data for the given test
+        case.
+    :rtype str
+    """
 
-def table_performance_trending_dashboard(table, input_data):
-    """Generate the table(s) with algorithm: table_performance_comparison
+    if u"x520" in test_name:
+        nic = u"x520"
+    elif u"x710" in test_name:
+        nic = u"x710"
+    elif u"xl710" in test_name:
+        nic = u"xl710"
+    elif u"xxv710" in test_name:
+        nic = u"xxv710"
+    elif u"vic1227" in test_name:
+        nic = u"vic1227"
+    elif u"vic1385" in test_name:
+        nic = u"vic1385"
+    elif u"x553" in test_name:
+        nic = u"x553"
+    elif u"cx556" in test_name or u"cx556a" in test_name:
+        nic = u"cx556a"
+    else:
+        nic = u""
+
+    if u"64b" in test_name:
+        frame_size = u"64b"
+    elif u"78b" in test_name:
+        frame_size = u"78b"
+    elif u"imix" in test_name:
+        frame_size = u"imix"
+    elif u"9000b" in test_name:
+        frame_size = u"9000b"
+    elif u"1518b" in test_name:
+        frame_size = u"1518b"
+    elif u"114b" in test_name:
+        frame_size = u"114b"
+    else:
+        frame_size = u""
+
+    if u"1t1c" in test_name or \
+        (u"-1c-" in test_name and
+         testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
+        cores = u"1t1c"
+    elif u"2t2c" in test_name or \
+         (u"-2c-" in test_name and
+          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
+        cores = u"2t2c"
+    elif u"4t4c" in test_name or \
+         (u"-4c-" in test_name and
+          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
+        cores = u"4t4c"
+    elif u"2t1c" in test_name or \
+         (u"-1c-" in test_name and
+          testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+        cores = u"2t1c"
+    elif u"4t2c" in test_name or \
+         (u"-2c-" in test_name and
+          testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+        cores = u"4t2c"
+    elif u"8t4c" in test_name or \
+         (u"-4c-" in test_name and
+          testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+        cores = u"8t4c"
+    else:
+        cores = u""
+
+    if u"testpmd" in test_name:
+        driver = u"testpmd"
+    elif u"l3fwd" in test_name:
+        driver = u"l3fwd"
+    elif u"avf" in test_name:
+        driver = u"avf"
+    elif u"rdma" in test_name:
+        driver = u"rdma"
+    elif u"dnv" in testbed or u"tsh" in testbed:
+        driver = u"ixgbe"
+    else:
+        driver = u"dpdk"
+
+    if u"macip-iacl1s" in test_name:
+        bsf = u"features-macip-iacl1"
+    elif u"macip-iacl10s" in test_name:
+        bsf = u"features-macip-iacl10"
+    elif u"macip-iacl50s" in test_name:
+        bsf = u"features-macip-iacl50"
+    elif u"iacl1s" in test_name:
+        bsf = u"features-iacl1"
+    elif u"iacl10s" in test_name:
+        bsf = u"features-iacl10"
+    elif u"iacl50s" in test_name:
+        bsf = u"features-iacl50"
+    elif u"oacl1s" in test_name:
+        bsf = u"features-oacl1"
+    elif u"oacl10s" in test_name:
+        bsf = u"features-oacl10"
+    elif u"oacl50s" in test_name:
+        bsf = u"features-oacl50"
+    elif u"nat44det" in test_name:
+        bsf = u"nat44det-bidir"
+    elif u"nat44ed" in test_name and u"udir" in test_name:
+        bsf = u"nat44ed-udir"
+    elif u"-cps" in test_name and u"ethip4udp" in test_name:
+        bsf = u"udp-cps"
+    elif u"-cps" in test_name and u"ethip4tcp" in test_name:
+        bsf = u"tcp-cps"
+    elif u"-pps" in test_name and u"ethip4udp" in test_name:
+        bsf = u"udp-pps"
+    elif u"-pps" in test_name and u"ethip4tcp" in test_name:
+        bsf = u"tcp-pps"
+    elif u"-tput" in test_name and u"ethip4udp" in test_name:
+        bsf = u"udp-tput"
+    elif u"-tput" in test_name and u"ethip4tcp" in test_name:
+        bsf = u"tcp-tput"
+    elif u"udpsrcscale" in test_name:
+        bsf = u"features-udp"
+    elif u"iacl" in test_name:
+        bsf = u"features"
+    elif u"policer" in test_name:
+        bsf = u"features"
+    elif u"adl" in test_name:
+        bsf = u"features"
+    elif u"cop" in test_name:
+        bsf = u"features"
+    elif u"nat" in test_name:
+        bsf = u"features"
+    elif u"macip" in test_name:
+        bsf = u"features"
+    elif u"scale" in test_name:
+        bsf = u"scale"
+    elif u"base" in test_name:
+        bsf = u"base"
+    else:
+        bsf = u"base"
+
+    if u"114b" in test_name and u"vhost" in test_name:
+        domain = u"vts"
+    elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
+        domain = u"nat44"
+        if u"nat44det" in test_name:
+            domain += u"-det-bidir"
+        else:
+            domain += u"-ed"
+        if u"udir" in test_name:
+            domain += u"-unidir"
+        elif u"-ethip4udp-" in test_name:
+            domain += u"-udp"
+        elif u"-ethip4tcp-" in test_name:
+            domain += u"-tcp"
+        if u"-cps" in test_name:
+            domain += u"-cps"
+        elif u"-pps" in test_name:
+            domain += u"-pps"
+        elif u"-tput" in test_name:
+            domain += u"-tput"
+    elif u"testpmd" in test_name or u"l3fwd" in test_name:
+        domain = u"dpdk"
+    elif u"memif" in test_name:
+        domain = u"container_memif"
+    elif u"srv6" in test_name:
+        domain = u"srv6"
+    elif u"vhost" in test_name:
+        domain = u"vhost"
+        if u"vppl2xc" in test_name:
+            driver += u"-vpp"
+        else:
+            driver += u"-testpmd"
+        if u"lbvpplacp" in test_name:
+            bsf += u"-link-bonding"
+    elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
+        domain = u"nf_service_density_vnfc"
+    elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
+        domain = u"nf_service_density_cnfc"
+    elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
+        domain = u"nf_service_density_cnfp"
+    elif u"ipsec" in test_name:
+        domain = u"ipsec"
+        if u"sw" in test_name:
+            bsf += u"-sw"
+        elif u"hw" in test_name:
+            bsf += u"-hw"
+    elif u"ethip4vxlan" in test_name:
+        domain = u"ip4_tunnels"
+    elif u"ethip4udpgeneve" in test_name:
+        domain = u"ip4_tunnels"
+    elif u"ip4base" in test_name or u"ip4scale" in test_name:
+        domain = u"ip4"
+    elif u"ip6base" in test_name or u"ip6scale" in test_name:
+        domain = u"ip6"
+    elif u"l2xcbase" in test_name or \
+            u"l2xcscale" in test_name or \
+            u"l2bdbasemaclrn" in test_name or \
+            u"l2bdscale" in test_name or \
+            u"l2patch" in test_name:
+        domain = u"l2"
+    else:
+        domain = u""
+
+    file_name = u"-".join((domain, testbed, nic)) + u".html#"
+    anchor_name = u"-".join((frame_size, cores, bsf, driver))
+
+    return file_name + anchor_name
+
+
+def table_perf_trending_dash_html(table, input_data):
+    """Generate the table(s) with algorithm:
+    table_perf_trending_dash_html specified in the specification
+    file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: dict
+    :type input_data: InputData
+    """
+
+    _ = input_data
+
+    if not table.get(u"testbed", None):
+        logging.error(
+            f"The testbed is not defined for the table "
+            f"{table.get(u'title', u'')}. Skipping."
+        )
+        return
+
+    test_type = table.get(u"test-type", u"MRR")
+    if test_type not in (u"MRR", u"NDR", u"PDR"):
+        logging.error(
+            f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
+            f"Skipping."
+        )
+        return
+
+    if test_type in (u"NDR", u"PDR"):
+        lnk_dir = u"../ndrpdr_trending/"
+        lnk_sufix = f"-{test_type.lower()}"
+    else:
+        lnk_dir = u"../trending/"
+        lnk_sufix = u""
+
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
+
+    try:
+        with open(table[u"input-file"], u'rt') as csv_file:
+            csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
+    except FileNotFoundError as err:
+        logging.warning(f"{err}")
+        return
+    except KeyError:
+        logging.warning(u"The input file is not defined.")
+        return
+    except csv.Error as err:
+        logging.warning(
+            f"Not possible to process the file {table[u'input-file']}.\n"
+            f"{repr(err)}"
+        )
+        return
+
+    # Table:
+    dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
+
+    # Table header:
+    trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
+    for idx, item in enumerate(csv_lst[0]):
+        alignment = u"left" if idx == 0 else u"center"
+        thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
+        thead.text = item
+
+    # Rows:
+    colors = {
+        u"regression": (
+            u"#ffcccc",
+            u"#ff9999"
+        ),
+        u"progression": (
+            u"#c6ecc6",
+            u"#9fdf9f"
+        ),
+        u"normal": (
+            u"#e9f1fb",
+            u"#d4e4f7"
+        )
+    }
+    for r_idx, row in enumerate(csv_lst[1:]):
+        if int(row[4]):
+            color = u"regression"
+        elif int(row[5]):
+            color = u"progression"
+        else:
+            color = u"normal"
+        trow = ET.SubElement(
+            dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
+        )
+
+        # Columns:
+        for c_idx, item in enumerate(row):
+            tdata = ET.SubElement(
+                trow,
+                u"td",
+                attrib=dict(align=u"left" if c_idx == 0 else u"center")
+            )
+            # Name:
+            if c_idx == 0 and table.get(u"add-links", True):
+                ref = ET.SubElement(
+                    tdata,
+                    u"a",
+                    attrib=dict(
+                        href=f"{lnk_dir}"
+                        f"{_generate_url(table.get(u'testbed', ''), item)}"
+                        f"{lnk_sufix}"
+                    )
+                )
+                ref.text = item
+            else:
+                tdata.text = item
+    try:
+        with open(table[u"output-file"], u'w') as html_file:
+            logging.info(f"    Writing file: {table[u'output-file']}")
+            html_file.write(u".. raw:: html\n\n\t")
+            html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
+            html_file.write(u"\n\t<p><br><br></p>\n")
+    except KeyError:
+        logging.warning(u"The output file is not defined.")
+        return
+
+
+def table_last_failed_tests(table, input_data):
+    """Generate the table(s) with algorithm: table_last_failed_tests
     specified in the specification file.
 
     :param table: Table to generate.
@@ -664,208 +1333,176 @@ def table_performance_trending_dashboard(table, input_data):
     :type input_data: InputData
     """
 
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
 
     # Transform the data
+    logging.info(
+        f"    Creating the data set for the {table.get(u'type', u'')} "
+        f"{table.get(u'title', u'')}."
+    )
+
     data = input_data.filter_data(table, continue_on_error=True)
 
+    if data is None or data.empty:
+        logging.warning(
+            f"    No data for the {table.get(u'type', u'')} "
+            f"{table.get(u'title', u'')}."
+        )
+        return
+
+    tbl_list = list()
+    for job, builds in table[u"data"].items():
+        for build in builds:
+            build = str(build)
+            try:
+                version = input_data.metadata(job, build).get(u"version", u"")
+                duration = \
+                    input_data.metadata(job, build).get(u"elapsedtime", u"")
+            except KeyError:
+                logging.error(f"Data for {job}: {build} is not present.")
+                return
+            tbl_list.append(build)
+            tbl_list.append(version)
+            failed_tests = list()
+            passed = 0
+            failed = 0
+            for tst_data in data[job][build].values:
+                if tst_data[u"status"] != u"FAIL":
+                    passed += 1
+                    continue
+                failed += 1
+                groups = re.search(REGEX_NIC, tst_data[u"parent"])
+                if not groups:
+                    continue
+                nic = groups.group(0)
+                failed_tests.append(f"{nic}-{tst_data[u'name']}")
+            tbl_list.append(passed)
+            tbl_list.append(failed)
+            tbl_list.append(duration)
+            tbl_list.extend(failed_tests)
+
+    file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
+    logging.info(f"    Writing file: {file_name}")
+    with open(file_name, u"wt") as file_handler:
+        for test in tbl_list:
+            file_handler.write(f"{test}\n")
+
+
+def table_failed_tests(table, input_data):
+    """Generate the table(s) with algorithm: table_failed_tests
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
+
+    # Transform the data
+    logging.info(
+        f"    Creating the data set for the {table.get(u'type', u'')} "
+        f"{table.get(u'title', u'')}."
+    )
+    data = input_data.filter_data(table, continue_on_error=True)
+
+    test_type = u"MRR"
+    if u"NDRPDR" in table.get(u"filter", list()):
+        test_type = u"NDRPDR"
+
     # Prepare the header of the tables
-    header = ["Test Case",
-              "Throughput Trend [Mpps]",
-              "Trend Compliance",
-              "Top Anomaly [Mpps]",
-              "Change [%]",
-              "Outliers [Number]"
-              ]
-    header_str = ",".join(header) + "\n"
+    header = [
+        u"Test Case",
+        u"Failures [#]",
+        u"Last Failure [Time]",
+        u"Last Failure [VPP-Build-Id]",
+        u"Last Failure [CSIT-Job-Build-Id]"
+    ]
+
+    # Generate the data for the table according to the model in the table
+    # specification
+
+    now = dt.utcnow()
+    timeperiod = timedelta(int(table.get(u"window", 7)))
 
-    # Prepare data to the table:
     tbl_dict = dict()
-    for job, builds in table["data"].items():
+    for job, builds in table[u"data"].items():
         for build in builds:
-            for tst_name, tst_data in data[job][str(build)].iteritems():
+            build = str(build)
+            for tst_name, tst_data in data[job][build].items():
+                if tst_name.lower() in table.get(u"ignore-list", list()):
+                    continue
                 if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "data": dict()}
+                    groups = re.search(REGEX_NIC, tst_data[u"parent"])
+                    if not groups:
+                        continue
+                    nic = groups.group(0)
+                    tbl_dict[tst_name] = {
+                        u"name": f"{nic}-{tst_data[u'name']}",
+                        u"data": OrderedDict()
+                    }
                 try:
-                    tbl_dict[tst_name]["data"][str(build)] =  \
-                        tst_data["result"]["throughput"]
-                except (TypeError, KeyError):
-                    pass  # No data in output.xml for this test
-
+                    generated = input_data.metadata(job, build).\
+                        get(u"generated", u"")
+                    if not generated:
+                        continue
+                    then = dt.strptime(generated, u"%Y%m%d %H:%M")
+                    if (now - then) <= timeperiod:
+                        tbl_dict[tst_name][u"data"][build] = (
+                            tst_data[u"status"],
+                            generated,
+                            input_data.metadata(job, build).get(u"version",
+                                                                u""),
+                            build
+                        )
+                except (TypeError, KeyError) as err:
+                    logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
+
+    max_fails = 0
     tbl_lst = list()
-    for tst_name in tbl_dict.keys():
-        if len(tbl_dict[tst_name]["data"]) > 2:
-
-            pd_data = pd.Series(tbl_dict[tst_name]["data"])
-            win_size = pd_data.size \
-                if pd_data.size < table["window"] else table["window"]
-            # Test name:
-            name = tbl_dict[tst_name]["name"]
-
-            median = pd_data.rolling(window=win_size, min_periods=2).median()
-            trimmed_data, _ = find_outliers(pd_data, outlier_const=1.5)
-            stdev_t = pd_data.rolling(window=win_size, min_periods=2).std()
-
-            rel_change_lst = [None, ]
-            classification_lst = [None, ]
-            median_lst = [None, ]
-            sample_lst = [None, ]
-            first = True
-            for build_nr, value in pd_data.iteritems():
-                if first:
-                    first = False
-                    continue
-                # Relative changes list:
-                if not isnan(value) \
-                        and not isnan(median[build_nr]) \
-                        and median[build_nr] != 0:
-                    rel_change_lst.append(round(
-                        relative_change(float(median[build_nr]), float(value)),
-                        2))
-                else:
-                    rel_change_lst.append(None)
-
-                # Classification list:
-                if isnan(trimmed_data[build_nr]) \
-                        or isnan(median[build_nr]) \
-                        or isnan(stdev_t[build_nr]) \
-                        or isnan(value):
-                    classification_lst.append("outlier")
-                elif value < (median[build_nr] - 3 * stdev_t[build_nr]):
-                    classification_lst.append("regression")
-                elif value > (median[build_nr] + 3 * stdev_t[build_nr]):
-                    classification_lst.append("progression")
-                else:
-                    classification_lst.append("normal")
-                sample_lst.append(value)
-                median_lst.append(median[build_nr])
-
-            last_idx = len(classification_lst) - 1
-            first_idx = last_idx - int(table["evaluated-window"])
-            if first_idx < 0:
-                first_idx = 0
-
-            nr_outliers = 0
-            consecutive_outliers = 0
-            failure = False
-            for item in classification_lst[first_idx:]:
-                if item == "outlier":
-                    nr_outliers += 1
-                    consecutive_outliers += 1
-                    if consecutive_outliers == 3:
-                        failure = True
-                else:
-                    consecutive_outliers = 0
-
-            if failure:
-                classification = "failure"
-            elif "regression" in classification_lst[first_idx:]:
-                classification = "regression"
-            elif "progression" in classification_lst[first_idx:]:
-                classification = "progression"
-            else:
-                classification = "normal"
-
-            if classification == "normal":
-                index = len(classification_lst) - 1
-            else:
-                tmp_classification = "outlier" if classification == "failure" \
-                    else classification
-                for idx in range(first_idx, len(classification_lst)):
-                    if classification_lst[idx] == tmp_classification:
-                        index = idx
-                        break
-                for idx in range(index+1, len(classification_lst)):
-                    if classification_lst[idx] == tmp_classification:
-                        if rel_change_lst[idx] > rel_change_lst[index]:
-                            index = idx
-
-            # if "regression" in classification_lst[first_idx:]:
-            #     classification = "regression"
-            # elif "outlier" in classification_lst[first_idx:]:
-            #     classification = "outlier"
-            # elif "progression" in classification_lst[first_idx:]:
-            #     classification = "progression"
-            # elif "normal" in classification_lst[first_idx:]:
-            #     classification = "normal"
-            # else:
-            #     classification = None
-            #
-            # nr_outliers = 0
-            # consecutive_outliers = 0
-            # failure = False
-            # for item in classification_lst[first_idx:]:
-            #     if item == "outlier":
-            #         nr_outliers += 1
-            #         consecutive_outliers += 1
-            #         if consecutive_outliers == 3:
-            #             failure = True
-            #     else:
-            #         consecutive_outliers = 0
-            #
-            # idx = len(classification_lst) - 1
-            # while idx:
-            #     if classification_lst[idx] == classification:
-            #         break
-            #     idx -= 1
-            #
-            # if failure:
-            #     classification = "failure"
-            # elif classification == "outlier":
-            #     classification = "normal"
-
-            trend = round(float(median_lst[-1]) / 1000000, 2) \
-                if not isnan(median_lst[-1]) else ''
-            sample = round(float(sample_lst[index]) / 1000000, 2) \
-                if not isnan(sample_lst[index]) else ''
-            rel_change = rel_change_lst[index] \
-                if rel_change_lst[index] is not None else ''
-            tbl_lst.append([name,
-                            trend,
-                            classification,
-                            '-' if classification == "normal" else sample,
-                            '-' if classification == "normal" else rel_change,
-                            nr_outliers])
-
-    # Sort the table according to the classification
+    for tst_data in tbl_dict.values():
+        fails_nr = 0
+        fails_last_date = u""
+        fails_last_vpp = u""
+        fails_last_csit = u""
+        for val in tst_data[u"data"].values():
+            if val[0] == u"FAIL":
+                fails_nr += 1
+                fails_last_date = val[1]
+                fails_last_vpp = val[2]
+                fails_last_csit = val[3]
+        if fails_nr:
+            max_fails = fails_nr if fails_nr > max_fails else max_fails
+            tbl_lst.append([
+                tst_data[u"name"],
+                fails_nr,
+                fails_last_date,
+                fails_last_vpp,
+                f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
+                f"-build-{fails_last_csit}"
+            ])
+
+    tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
     tbl_sorted = list()
-    for classification in ("failure", "regression", "progression", "normal"):
-        tbl_tmp = [item for item in tbl_lst if item[2] == classification]
-        tbl_tmp.sort(key=lambda rel: rel[0])
-        tbl_sorted.extend(tbl_tmp)
-
-    file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
-
-    logging.info("      Writing file: '{0}'".format(file_name))
-    with open(file_name, "w") as file_handler:
-        file_handler.write(header_str)
+    for nrf in range(max_fails, -1, -1):
+        tbl_fails = [item for item in tbl_lst if item[1] == nrf]
+        tbl_sorted.extend(tbl_fails)
+
+    file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
+    logging.info(f"    Writing file: {file_name}")
+    with open(file_name, u"wt") as file_handler:
+        file_handler.write(u",".join(header) + u"\n")
         for test in tbl_sorted:
-            file_handler.write(",".join([str(item) for item in test]) + '\n')
-
-    txt_file_name = "{0}.txt".format(table["output-file"])
-    txt_table = None
-    logging.info("      Writing file: '{0}'".format(txt_file_name))
-    with open(file_name, 'rb') as csv_file:
-        csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
-        for row in csv_content:
-            if txt_table is None:
-                txt_table = prettytable.PrettyTable(row)
-            else:
-                txt_table.add_row(row)
-        txt_table.align["Test case"] = "l"
-    with open(txt_file_name, "w") as txt_file:
-        txt_file.write(str(txt_table))
+            file_handler.write(u",".join([str(item) for item in test]) + u'\n')
 
+    logging.info(f"    Writing file: {table[u'output-file']}.txt")
+    convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
 
-def table_performance_trending_dashboard_html(table, input_data):
-    """Generate the table(s) with algorithm:
-    table_performance_trending_dashboard_html specified in the specification
-    file.
+
+def table_failed_tests_html(table, input_data):
+    """Generate the table(s) with algorithm: table_failed_tests_html
+    specified in the specification file.
 
     :param table: Table to generate.
     :param input_data: Data to process.
@@ -873,123 +1510,651 @@ def table_performance_trending_dashboard_html(table, input_data):
     :type input_data: InputData
     """
 
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
+    _ = input_data
+
+    if not table.get(u"testbed", None):
+        logging.error(
+            f"The testbed is not defined for the table "
+            f"{table.get(u'title', u'')}. Skipping."
+        )
+        return
+
+    test_type = table.get(u"test-type", u"MRR")
+    if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
+        logging.error(
+            f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
+            f"Skipping."
+        )
+        return
+
+    if test_type in (u"NDRPDR", u"NDR", u"PDR"):
+        lnk_dir = u"../ndrpdr_trending/"
+        lnk_sufix = u"-pdr"
+    else:
+        lnk_dir = u"../trending/"
+        lnk_sufix = u""
+
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
 
     try:
-        with open(table["input-file"], 'rb') as csv_file:
-            csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
-            csv_lst = [item for item in csv_content]
+        with open(table[u"input-file"], u'rt') as csv_file:
+            csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
     except KeyError:
-        logging.warning("The input file is not defined.")
+        logging.warning(u"The input file is not defined.")
         return
     except csv.Error as err:
-        logging.warning("Not possible to process the file '{0}'.\n{1}".
-                        format(table["input-file"], err))
+        logging.warning(
+            f"Not possible to process the file {table[u'input-file']}.\n"
+            f"{repr(err)}"
+        )
         return
 
     # Table:
-    dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
+    failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
 
     # Table header:
-    tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
+    trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
     for idx, item in enumerate(csv_lst[0]):
-        alignment = "left" if idx == 0 else "center"
-        th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
-        th.text = item
+        alignment = u"left" if idx == 0 else u"center"
+        thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
+        thead.text = item
 
     # Rows:
+    colors = (u"#e9f1fb", u"#d4e4f7")
     for r_idx, row in enumerate(csv_lst[1:]):
-        background = "#D4E4F7" if r_idx % 2 else "white"
-        tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
+        background = colors[r_idx % 2]
+        trow = ET.SubElement(
+            failed_tests, u"tr", attrib=dict(bgcolor=background)
+        )
 
         # Columns:
         for c_idx, item in enumerate(row):
-            alignment = "left" if c_idx == 0 else "center"
-            td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
+            tdata = ET.SubElement(
+                trow,
+                u"td",
+                attrib=dict(align=u"left" if c_idx == 0 else u"center")
+            )
             # Name:
-            url = "../trending/"
-            file_name = ""
-            anchor = "#"
-            feature = ""
-            if c_idx == 0:
-                if "memif" in item:
-                    file_name = "container_memif.html"
-
-                elif "vhost" in item:
-                    if "l2xcbase" in item or "l2bdbasemaclrn" in item:
-                        file_name = "vm_vhost_l2.html"
-                    elif "ip4base" in item:
-                        file_name = "vm_vhost_ip4.html"
-
-                elif "ipsec" in item:
-                    file_name = "ipsec.html"
-
-                elif "ethip4lispip" in item or "ethip4vxlan" in item:
-                    file_name = "ip4_tunnels.html"
-
-                elif "ip4base" in item or "ip4scale" in item:
-                    file_name = "ip4.html"
-                    if "iacl" in item or "snat" in item or "cop" in item:
-                        feature = "-features"
-
-                elif "ip6base" in item or "ip6scale" in item:
-                    file_name = "ip6.html"
-
-                elif "l2xcbase" in item or "l2xcscale" in item \
-                        or "l2bdbasemaclrn" in item or "l2bdscale" in item \
-                        or "l2dbbasemaclrn" in item or "l2dbscale" in item:
-                    file_name = "l2.html"
-                    if "iacl" in item:
-                        feature = "-features"
-
-                if "x520" in item:
-                    anchor += "x520-"
-                elif "x710" in item:
-                    anchor += "x710-"
-                elif "xl710" in item:
-                    anchor += "xl710-"
-
-                if "64b" in item:
-                    anchor += "64b-"
-                elif "78b" in item:
-                    anchor += "78b"
-                elif "imix" in item:
-                    anchor += "imix-"
-                elif "9000b" in item:
-                    anchor += "9000b-"
-                elif "1518" in item:
-                    anchor += "1518b-"
-
-                if "1t1c" in item:
-                    anchor += "1t1c"
-                elif "2t2c" in item:
-                    anchor += "2t2c"
-                elif "4t4c" in item:
-                    anchor += "4t4c"
-
-                url = url + file_name + anchor + feature
-
-                ref = ET.SubElement(td, "a", attrib=dict(href=url))
+            if c_idx == 0 and table.get(u"add-links", True):
+                ref = ET.SubElement(
+                    tdata,
+                    u"a",
+                    attrib=dict(
+                        href=f"{lnk_dir}"
+                        f"{_generate_url(table.get(u'testbed', ''), item)}"
+                        f"{lnk_sufix}"
+                    )
+                )
                 ref.text = item
+            else:
+                tdata.text = item
+    try:
+        with open(table[u"output-file"], u'w') as html_file:
+            logging.info(f"    Writing file: {table[u'output-file']}")
+            html_file.write(u".. raw:: html\n\n\t")
+            html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
+            html_file.write(u"\n\t<p><br><br></p>\n")
+    except KeyError:
+        logging.warning(u"The output file is not defined.")
+        return
+
+
+def table_comparison(table, input_data):
+    """Generate the table(s) with algorithm: table_comparison
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
 
-            if c_idx == 2:
-                if item == "regression":
-                    td.set("bgcolor", "#eca1a6")
-                elif item == "failure":
-                    td.set("bgcolor", "#d6cbd3")
-                elif item == "progression":
-                    td.set("bgcolor", "#bdcebe")
-            if c_idx > 0:
-                td.text = item
+    # Transform the data
+    logging.info(
+        f"    Creating the data set for the {table.get(u'type', u'')} "
+        f"{table.get(u'title', u'')}."
+    )
+
+    columns = table.get(u"columns", None)
+    if not columns:
+        logging.error(
+            f"No columns specified for {table.get(u'title', u'')}. Skipping."
+        )
+        return
+
+    cols = list()
+    for idx, col in enumerate(columns):
+        if col.get(u"data-set", None) is None:
+            logging.warning(f"No data for column {col.get(u'title', u'')}")
+            continue
+        tag = col.get(u"tag", None)
+        data = input_data.filter_data(
+            table,
+            params=[
+                u"throughput",
+                u"result",
+                u"latency",
+                u"name",
+                u"parent",
+                u"tags"
+            ],
+            data=col[u"data-set"],
+            continue_on_error=True
+        )
+        col_data = {
+            u"title": col.get(u"title", f"Column{idx}"),
+            u"data": dict()
+        }
+        for builds in data.values:
+            for build in builds:
+                for tst_name, tst_data in build.items():
+                    if tag and tag not in tst_data[u"tags"]:
+                        continue
+                    tst_name_mod = \
+                        _tpc_modify_test_name(tst_name, ignore_nic=True).\
+                        replace(u"2n1l-", u"")
+                    if col_data[u"data"].get(tst_name_mod, None) is None:
+                        name = tst_data[u'name'].rsplit(u'-', 1)[0]
+                        if u"across testbeds" in table[u"title"].lower() or \
+                                u"across topologies" in table[u"title"].lower():
+                            name = _tpc_modify_displayed_test_name(name)
+                        col_data[u"data"][tst_name_mod] = {
+                            u"name": name,
+                            u"replace": True,
+                            u"data": list(),
+                            u"mean": None,
+                            u"stdev": None
+                        }
+                    _tpc_insert_data(
+                        target=col_data[u"data"][tst_name_mod],
+                        src=tst_data,
+                        include_tests=table[u"include-tests"]
+                    )
+
+        replacement = col.get(u"data-replacement", None)
+        if replacement:
+            rpl_data = input_data.filter_data(
+                table,
+                params=[
+                    u"throughput",
+                    u"result",
+                    u"latency",
+                    u"name",
+                    u"parent",
+                    u"tags"
+                ],
+                data=replacement,
+                continue_on_error=True
+            )
+            for builds in rpl_data.values:
+                for build in builds:
+                    for tst_name, tst_data in build.items():
+                        if tag and tag not in tst_data[u"tags"]:
+                            continue
+                        tst_name_mod = \
+                            _tpc_modify_test_name(tst_name, ignore_nic=True).\
+                            replace(u"2n1l-", u"")
+                        if col_data[u"data"].get(tst_name_mod, None) is None:
+                            name = tst_data[u'name'].rsplit(u'-', 1)[0]
+                            if u"across testbeds" in table[u"title"].lower() \
+                                    or u"across topologies" in \
+                                    table[u"title"].lower():
+                                name = _tpc_modify_displayed_test_name(name)
+                            col_data[u"data"][tst_name_mod] = {
+                                u"name": name,
+                                u"replace": False,
+                                u"data": list(),
+                                u"mean": None,
+                                u"stdev": None
+                            }
+                        if col_data[u"data"][tst_name_mod][u"replace"]:
+                            col_data[u"data"][tst_name_mod][u"replace"] = False
+                            col_data[u"data"][tst_name_mod][u"data"] = list()
+                        _tpc_insert_data(
+                            target=col_data[u"data"][tst_name_mod],
+                            src=tst_data,
+                            include_tests=table[u"include-tests"]
+                        )
+
+        if table[u"include-tests"] in (u"NDR", u"PDR") or \
+                u"latency" in table[u"include-tests"]:
+            for tst_name, tst_data in col_data[u"data"].items():
+                if tst_data[u"data"]:
+                    tst_data[u"mean"] = mean(tst_data[u"data"])
+                    tst_data[u"stdev"] = stdev(tst_data[u"data"])
+
+        cols.append(col_data)
+
+    tbl_dict = dict()
+    for col in cols:
+        for tst_name, tst_data in col[u"data"].items():
+            if tbl_dict.get(tst_name, None) is None:
+                tbl_dict[tst_name] = {
+                    "name": tst_data[u"name"]
+                }
+            tbl_dict[tst_name][col[u"title"]] = {
+                u"mean": tst_data[u"mean"],
+                u"stdev": tst_data[u"stdev"]
+            }
+
+    if not tbl_dict:
+        logging.warning(f"No data for table {table.get(u'title', u'')}!")
+        return
+
+    tbl_lst = list()
+    for tst_data in tbl_dict.values():
+        row = [tst_data[u"name"], ]
+        for col in cols:
+            row.append(tst_data.get(col[u"title"], None))
+        tbl_lst.append(row)
+
+    comparisons = table.get(u"comparisons", None)
+    rcas = list()
+    if comparisons and isinstance(comparisons, list):
+        for idx, comp in enumerate(comparisons):
+            try:
+                col_ref = int(comp[u"reference"])
+                col_cmp = int(comp[u"compare"])
+            except KeyError:
+                logging.warning(u"Comparison: No references defined! Skipping.")
+                comparisons.pop(idx)
+                continue
+            if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
+                    col_ref == col_cmp):
+                logging.warning(f"Wrong values of reference={col_ref} "
+                                f"and/or compare={col_cmp}. Skipping.")
+                comparisons.pop(idx)
+                continue
+            rca_file_name = comp.get(u"rca-file", None)
+            if rca_file_name:
+                try:
+                    with open(rca_file_name, u"r") as file_handler:
+                        rcas.append(
+                            {
+                                u"title": f"RCA{idx + 1}",
+                                u"data": load(file_handler, Loader=FullLoader)
+                            }
+                        )
+                except (YAMLError, IOError) as err:
+                    logging.warning(
+                        f"The RCA file {rca_file_name} does not exist or "
+                        f"it is corrupted!"
+                    )
+                    logging.debug(repr(err))
+                    rcas.append(None)
+            else:
+                rcas.append(None)
+    else:
+        comparisons = None
+
+    tbl_cmp_lst = list()
+    if comparisons:
+        for row in tbl_lst:
+            new_row = deepcopy(row)
+            for comp in comparisons:
+                ref_itm = row[int(comp[u"reference"])]
+                if ref_itm is None and \
+                        comp.get(u"reference-alt", None) is not None:
+                    ref_itm = row[int(comp[u"reference-alt"])]
+                cmp_itm = row[int(comp[u"compare"])]
+                if ref_itm is not None and cmp_itm is not None and \
+                        ref_itm[u"mean"] is not None and \
+                        cmp_itm[u"mean"] is not None and \
+                        ref_itm[u"stdev"] is not None and \
+                        cmp_itm[u"stdev"] is not None:
+                    try:
+                        delta, d_stdev = relative_change_stdev(
+                            ref_itm[u"mean"], cmp_itm[u"mean"],
+                            ref_itm[u"stdev"], cmp_itm[u"stdev"]
+                        )
+                    except ZeroDivisionError:
+                        break
+                    if delta is None or math.isnan(delta):
+                        break
+                    new_row.append({
+                        u"mean": delta * 1e6,
+                        u"stdev": d_stdev * 1e6
+                    })
+                else:
+                    break
+            else:
+                tbl_cmp_lst.append(new_row)
 
     try:
-        with open(table["output-file"], 'w') as html_file:
-            logging.info("      Writing file: '{0}'".
-                         format(table["output-file"]))
-            html_file.write(".. raw:: html\n\n\t")
-            html_file.write(ET.tostring(dashboard))
-            html_file.write("\n\t<p><br><br></p>\n")
-    except KeyError:
-        logging.warning("The output file is not defined.")
+        tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
+        tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
+    except TypeError as err:
+        logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
+
+    tbl_for_csv = list()
+    for line in tbl_cmp_lst:
+        row = [line[0], ]
+        for idx, itm in enumerate(line[1:]):
+            if itm is None or not isinstance(itm, dict) or\
+                    itm.get(u'mean', None) is None or \
+                    itm.get(u'stdev', None) is None:
+                row.append(u"NT")
+                row.append(u"NT")
+            else:
+                row.append(round(float(itm[u'mean']) / 1e6, 3))
+                row.append(round(float(itm[u'stdev']) / 1e6, 3))
+        for rca in rcas:
+            if rca is None:
+                continue
+            rca_nr = rca[u"data"].get(row[0], u"-")
+            row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+        tbl_for_csv.append(row)
+
+    header_csv = [u"Test Case", ]
+    for col in cols:
+        header_csv.append(f"Avg({col[u'title']})")
+        header_csv.append(f"Stdev({col[u'title']})")
+    for comp in comparisons:
+        header_csv.append(
+            f"Avg({comp.get(u'title', u'')})"
+        )
+        header_csv.append(
+            f"Stdev({comp.get(u'title', u'')})"
+        )
+    for rca in rcas:
+        if rca:
+            header_csv.append(rca[u"title"])
+
+    legend_lst = table.get(u"legend", None)
+    if legend_lst is None:
+        legend = u""
+    else:
+        legend = u"\n" + u"\n".join(legend_lst) + u"\n"
+
+    footnote = u""
+    if rcas and any(rcas):
+        footnote += u"\nRoot Cause Analysis:\n"
+        for rca in rcas:
+            if rca:
+                footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
+
+    csv_file_name = f"{table[u'output-file']}-csv.csv"
+    with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
+        file_handler.write(
+            u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
+        )
+        for test in tbl_for_csv:
+            file_handler.write(
+                u",".join([f'"{item}"' for item in test]) + u"\n"
+            )
+        if legend_lst:
+            for item in legend_lst:
+                file_handler.write(f'"{item}"\n')
+        if footnote:
+            for itm in footnote.split(u"\n"):
+                file_handler.write(f'"{itm}"\n')
+
+    tbl_tmp = list()
+    max_lens = [0, ] * len(tbl_cmp_lst[0])
+    for line in tbl_cmp_lst:
+        row = [line[0], ]
+        for idx, itm in enumerate(line[1:]):
+            if itm is None or not isinstance(itm, dict) or \
+                    itm.get(u'mean', None) is None or \
+                    itm.get(u'stdev', None) is None:
+                new_itm = u"NT"
+            else:
+                if idx < len(cols):
+                    new_itm = (
+                        f"{round(float(itm[u'mean']) / 1e6, 1)} "
+                        f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
+                        replace(u"nan", u"NaN")
+                    )
+                else:
+                    new_itm = (
+                        f"{round(float(itm[u'mean']) / 1e6, 1):+} "
+                        f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
+                        replace(u"nan", u"NaN")
+                    )
+            if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
+                max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
+            row.append(new_itm)
+
+        tbl_tmp.append(row)
+
+    header = [u"Test Case", ]
+    header.extend([col[u"title"] for col in cols])
+    header.extend([comp.get(u"title", u"") for comp in comparisons])
+
+    tbl_final = list()
+    for line in tbl_tmp:
+        row = [line[0], ]
+        for idx, itm in enumerate(line[1:]):
+            if itm in (u"NT", u"NaN"):
+                row.append(itm)
+                continue
+            itm_lst = itm.rsplit(u"\u00B1", 1)
+            itm_lst[-1] = \
+                f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
+            itm_str = u"\u00B1".join(itm_lst)
+
+            if idx >= len(cols):
+                # Diffs
+                rca = rcas[idx - len(cols)]
+                if rca:
+                    # Add rcas to diffs
+                    rca_nr = rca[u"data"].get(row[0], None)
+                    if rca_nr:
+                        hdr_len = len(header[idx + 1]) - 1
+                        if hdr_len < 19:
+                            hdr_len = 19
+                        rca_nr = f"[{rca_nr}]"
+                        itm_str = (
+                            f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
+                            f"{u' ' * (hdr_len - 4 - len(itm_str))}"
+                            f"{itm_str}"
+                        )
+            row.append(itm_str)
+        tbl_final.append(row)
+
+    # Generate csv tables:
+    csv_file_name = f"{table[u'output-file']}.csv"
+    logging.info(f"    Writing the file {csv_file_name}")
+    with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
+        file_handler.write(u";".join(header) + u"\n")
+        for test in tbl_final:
+            file_handler.write(u";".join([str(item) for item in test]) + u"\n")
+
+    # Generate txt table:
+    txt_file_name = f"{table[u'output-file']}.txt"
+    logging.info(f"    Writing the file {txt_file_name}")
+    convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
+
+    with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
+        file_handler.write(legend)
+        file_handler.write(footnote)
+
+    # Generate html table:
+    _tpc_generate_html_table(
+        header,
+        tbl_final,
+        table[u'output-file'],
+        legend=legend,
+        footnote=footnote,
+        sort_data=False,
+        title=table.get(u"title", u"")
+    )
+
+
+def table_weekly_comparison(table, in_data):
+    """Generate the table(s) with algorithm: table_weekly_comparison
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param in_data: Data to process.
+    :type table: pandas.Series
+    :type in_data: InputData
+    """
+    logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
+
+    # Transform the data
+    logging.info(
+        f"    Creating the data set for the {table.get(u'type', u'')} "
+        f"{table.get(u'title', u'')}."
+    )
+
+    incl_tests = table.get(u"include-tests", None)
+    if incl_tests not in (u"NDR", u"PDR"):
+        logging.error(f"Wrong tests to include specified ({incl_tests}).")
+        return
+
+    nr_cols = table.get(u"nr-of-data-columns", None)
+    if not nr_cols or nr_cols < 2:
+        logging.error(
+            f"No columns specified for {table.get(u'title', u'')}. Skipping."
+        )
         return
+
+    data = in_data.filter_data(
+        table,
+        params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+        continue_on_error=True
+    )
+
+    header = [
+        [u"VPP Version", ],
+        [u"Start Timestamp", ],
+        [u"CSIT Build", ],
+        [u"CSIT Testbed", ]
+    ]
+    tbl_dict = dict()
+    idx = 0
+    tb_tbl = table.get(u"testbeds", None)
+    for job_name, job_data in data.items():
+        for build_nr, build in job_data.items():
+            if idx >= nr_cols:
+                break
+            if build.empty:
+                continue
+
+            tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
+            if tb_ip and tb_tbl:
+                testbed = tb_tbl.get(tb_ip, u"")
+            else:
+                testbed = u""
+            header[2].insert(1, build_nr)
+            header[3].insert(1, testbed)
+            header[1].insert(
+                1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
+            )
+            header[0].insert(
+                1, in_data.metadata(job_name, build_nr).get(u"version", u"")
+            )
+
+            for tst_name, tst_data in build.items():
+                tst_name_mod = \
+                    _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
+                if not tbl_dict.get(tst_name_mod, None):
+                    tbl_dict[tst_name_mod] = dict(
+                        name=tst_data[u'name'].rsplit(u'-', 1)[0],
+                    )
+                try:
+                    tbl_dict[tst_name_mod][-idx - 1] = \
+                        tst_data[u"throughput"][incl_tests][u"LOWER"]
+                except (TypeError, IndexError, KeyError, ValueError):
+                    pass
+            idx += 1
+
+    if idx < nr_cols:
+        logging.error(u"Not enough data to build the table! Skipping")
+        return
+
+    cmp_dict = dict()
+    for idx, cmp in enumerate(table.get(u"comparisons", list())):
+        idx_ref = cmp.get(u"reference", None)
+        idx_cmp = cmp.get(u"compare", None)
+        if idx_ref is None or idx_cmp is None:
+            continue
+        header[0].append(
+            f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
+            f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
+        )
+        header[1].append(u"")
+        header[2].append(u"")
+        header[3].append(u"")
+        for tst_name, tst_data in tbl_dict.items():
+            if not cmp_dict.get(tst_name, None):
+                cmp_dict[tst_name] = list()
+            ref_data = tst_data.get(idx_ref, None)
+            cmp_data = tst_data.get(idx_cmp, None)
+            if ref_data is None or cmp_data is None:
+                cmp_dict[tst_name].append(float(u'nan'))
+            else:
+                cmp_dict[tst_name].append(
+                    relative_change(ref_data, cmp_data)
+                )
+
+    tbl_lst_none = list()
+    tbl_lst = list()
+    for tst_name, tst_data in tbl_dict.items():
+        itm_lst = [tst_data[u"name"], ]
+        for idx in range(nr_cols):
+            item = tst_data.get(-idx - 1, None)
+            if item is None:
+                itm_lst.insert(1, None)
+            else:
+                itm_lst.insert(1, round(item / 1e6, 1))
+        itm_lst.extend(
+            [
+                None if itm is None else round(itm, 1)
+                for itm in cmp_dict[tst_name]
+            ]
+        )
+        if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
+            tbl_lst_none.append(itm_lst)
+        else:
+            tbl_lst.append(itm_lst)
+
+    tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
+    tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
+    tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
+    tbl_lst.extend(tbl_lst_none)
+
+    # Generate csv table:
+    csv_file_name = f"{table[u'output-file']}.csv"
+    logging.info(f"    Writing the file {csv_file_name}")
+    with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
+        for hdr in header:
+            file_handler.write(u",".join(hdr) + u"\n")
+        for test in tbl_lst:
+            file_handler.write(u",".join(
+                [
+                    str(item).replace(u"None", u"-").replace(u"nan", u"-").
+                    replace(u"null", u"-") for item in test
+                ]
+            ) + u"\n")
+
+    txt_file_name = f"{table[u'output-file']}.txt"
+    logging.info(f"    Writing the file {txt_file_name}")
+    convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
+
+    # Reorganize header in txt table
+    txt_table = list()
+    with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
+        for line in list(file_handler):
+            txt_table.append(line)
+    try:
+        txt_table.insert(5, txt_table.pop(2))
+        with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
+            file_handler.writelines(txt_table)
+    except IndexError:
+        pass
+
+    # Generate html table:
+    hdr_html = [
+        u"<br>".join(row) for row in zip(*header)
+    ]
+    _tpc_generate_html_table(
+        hdr_html,
+        tbl_lst,
+        table[u'output-file'],
+        sort_data=True,
+        title=table.get(u"title", u""),
+        generate_rst=False
+    )