CSIT-1340: Fix the list of failed tests in Trending
[csit.git] / resources / tools / presentation / generator_tables.py
index 40eda7b..7590daa 100644 (file)
 
 import logging
 import csv
-import pandas as pd
+import re
 
 from string import replace
 from collections import OrderedDict
 from numpy import nan, isnan
 from xml.etree import ElementTree as ET
+from datetime import datetime as dt
+from datetime import timedelta
 
-from errors import PresentationError
 from utils import mean, stdev, relative_change, classify_anomalies, \
     convert_csv_to_pretty_txt
 
 
+REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
+
+
 def generate_tables(spec, data):
     """Generate all tables specified in the specification file.
 
@@ -181,175 +185,6 @@ def table_merged_details(table, input_data):
     logging.info("  Done.")
 
 
-def table_performance_improvements(table, input_data):
-    """Generate the table(s) with algorithm: table_performance_improvements
-    specified in the specification file.
-
-    :param table: Table to generate.
-    :param input_data: Data to process.
-    :type table: pandas.Series
-    :type input_data: InputData
-    """
-
-    def _write_line_to_file(file_handler, data):
-        """Write a line to the .csv file.
-
-        :param file_handler: File handler for the csv file. It must be open for
-         writing text.
-        :param data: Item to be written to the file.
-        :type file_handler: BinaryIO
-        :type data: list
-        """
-
-        line_lst = list()
-        for item in data:
-            if isinstance(item["data"], str):
-                # Remove -?drdisc from the end
-                if item["data"].endswith("drdisc"):
-                    item["data"] = item["data"][:-8]
-                line_lst.append(item["data"])
-            elif isinstance(item["data"], float):
-                line_lst.append("{:.1f}".format(item["data"]))
-            elif item["data"] is None:
-                line_lst.append("")
-        file_handler.write(",".join(line_lst) + "\n")
-
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
-
-    # Read the template
-    file_name = table.get("template", None)
-    if file_name:
-        try:
-            tmpl = _read_csv_template(file_name)
-        except PresentationError:
-            logging.error("  The template '{0}' does not exist. Skipping the "
-                          "table.".format(file_name))
-            return None
-    else:
-        logging.error("The template is not defined. Skipping the table.")
-        return None
-
-    # Transform the data
-    logging.info("    Creating the data set for the {0} '{1}'.".
-                 format(table.get("type", ""), table.get("title", "")))
-    data = input_data.filter_data(table)
-
-    # Prepare the header of the tables
-    header = list()
-    for column in table["columns"]:
-        header.append(column["title"])
-
-    # Generate the data for the table according to the model in the table
-    # specification
-    tbl_lst = list()
-    for tmpl_item in tmpl:
-        tbl_item = list()
-        for column in table["columns"]:
-            cmd = column["data"].split(" ")[0]
-            args = column["data"].split(" ")[1:]
-            if cmd == "template":
-                try:
-                    val = float(tmpl_item[int(args[0])])
-                except ValueError:
-                    val = tmpl_item[int(args[0])]
-                tbl_item.append({"data": val})
-            elif cmd == "data":
-                jobs = args[0:-1]
-                operation = args[-1]
-                data_lst = list()
-                for job in jobs:
-                    for build in data[job]:
-                        try:
-                            data_lst.append(float(build[tmpl_item[0]]
-                                                  ["throughput"]["value"]))
-                        except (KeyError, TypeError):
-                            # No data, ignore
-                            continue
-                if data_lst:
-                    tbl_item.append({"data": (eval(operation)(data_lst)) /
-                                             1000000})
-                else:
-                    tbl_item.append({"data": None})
-            elif cmd == "operation":
-                operation = args[0]
-                try:
-                    nr1 = float(tbl_item[int(args[1])]["data"])
-                    nr2 = float(tbl_item[int(args[2])]["data"])
-                    if nr1 and nr2:
-                        tbl_item.append({"data": eval(operation)(nr1, nr2)})
-                    else:
-                        tbl_item.append({"data": None})
-                except (IndexError, ValueError, TypeError):
-                    logging.error("No data for {0}".format(tbl_item[0]["data"]))
-                    tbl_item.append({"data": None})
-                    continue
-            else:
-                logging.error("Not supported command {0}. Skipping the table.".
-                              format(cmd))
-                return None
-        tbl_lst.append(tbl_item)
-
-    # Sort the table according to the relative change
-    tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
-
-    # Create the tables and write them to the files
-    file_names = [
-        "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
-        "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
-        "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
-        "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
-    ]
-
-    for file_name in file_names:
-        logging.info("    Writing the file '{0}'".format(file_name))
-        with open(file_name, "w") as file_handler:
-            file_handler.write(",".join(header) + "\n")
-            for item in tbl_lst:
-                if isinstance(item[-1]["data"], float):
-                    rel_change = round(item[-1]["data"], 1)
-                else:
-                    rel_change = item[-1]["data"]
-                if "ndr_top" in file_name \
-                        and "ndr" in item[0]["data"] \
-                        and rel_change >= 10.0:
-                    _write_line_to_file(file_handler, item)
-                elif "pdr_top" in file_name \
-                        and "pdr" in item[0]["data"] \
-                        and rel_change >= 10.0:
-                    _write_line_to_file(file_handler, item)
-                elif "ndr_low" in file_name \
-                        and "ndr" in item[0]["data"] \
-                        and rel_change < 10.0:
-                    _write_line_to_file(file_handler, item)
-                elif "pdr_low" in file_name \
-                        and "pdr" in item[0]["data"] \
-                        and rel_change < 10.0:
-                    _write_line_to_file(file_handler, item)
-
-    logging.info("  Done.")
-
-
-def _read_csv_template(file_name):
-    """Read the template from a .csv file.
-
-    :param file_name: Name / full path / relative path of the file to read.
-    :type file_name: str
-    :returns: Data from the template as list (lines) of lists (items on line).
-    :rtype: list
-    :raises: PresentationError if it is not possible to read the file.
-    """
-
-    try:
-        with open(file_name, 'r') as csv_file:
-            tmpl_data = list()
-            for line in csv_file:
-                tmpl_data.append(line[:-1].split(","))
-        return tmpl_data
-    except IOError as err:
-        raise PresentationError(str(err), level="ERROR")
-
-
 def table_performance_comparison(table, input_data):
     """Generate the table(s) with algorithm: table_performance_comparison
     specified in the specification file.
@@ -372,18 +207,23 @@ def table_performance_comparison(table, input_data):
     try:
         header = ["Test case", ]
 
+        if table["include-tests"] == "MRR":
+            hdr_param = "Receive Rate"
+        else:
+            hdr_param = "Throughput"
+
         history = table.get("history", None)
         if history:
             for item in history:
                 header.extend(
-                    ["{0} Throughput [Mpps]".format(item["title"]),
+                    ["{0} {1} [Mpps]".format(item["title"], hdr_param),
                      "{0} Stdev [Mpps]".format(item["title"])])
         header.extend(
-            ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
+            ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
-             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+             "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
-             "Change [%]"])
+             "Delta [%]"])
         header_str = ",".join(header) + "\n"
     except (AttributeError, KeyError) as err:
         logging.error("The model is invalid, missing parameter: {0}".
@@ -395,45 +235,137 @@ def table_performance_comparison(table, input_data):
     for job, builds in table["reference"]["data"].items():
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].iteritems():
-                if tbl_dict.get(tst_name, None) is None:
+                tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
+                    replace("-ndrpdr", "").replace("-pdrdisc", "").\
+                    replace("-ndrdisc", "").replace("-pdr", "").\
+                    replace("-ndr", "").\
+                    replace("1t1c", "1c").replace("2t1c", "1c").\
+                    replace("2t2c", "2c").replace("4t2c", "2c").\
+                    replace("4t4c", "4c").replace("8t4c", "4c")
+                if "across topologies" in table["title"].lower():
+                    tst_name_mod = tst_name_mod.replace("2n1l-", "")
+                if tbl_dict.get(tst_name_mod, None) is None:
                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
                                             "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "ref-data": list(),
-                                          "cmp-data": list()}
+                                                     split("-")[:-1]))
+                    if "across testbeds" in table["title"].lower() or \
+                            "across topologies" in table["title"].lower():
+                        name = name.\
+                            replace("1t1c", "1c").replace("2t1c", "1c").\
+                            replace("2t2c", "2c").replace("4t2c", "2c").\
+                            replace("4t4c", "4c").replace("8t4c", "4c")
+                    tbl_dict[tst_name_mod] = {"name": name,
+                                              "ref-data": list(),
+                                              "cmp-data": list()}
                 try:
-                    tbl_dict[tst_name]["ref-data"].\
-                        append(tst_data["throughput"]["value"])
+                    # TODO: Re-work when NDRPDRDISC tests are not used
+                    if table["include-tests"] == "MRR":
+                        tbl_dict[tst_name_mod]["ref-data"]. \
+                            append(tst_data["result"]["receive-rate"].avg)
+                    elif table["include-tests"] == "PDR":
+                        if tst_data["type"] == "PDR":
+                            tbl_dict[tst_name_mod]["ref-data"]. \
+                                append(tst_data["throughput"]["value"])
+                        elif tst_data["type"] == "NDRPDR":
+                            tbl_dict[tst_name_mod]["ref-data"].append(
+                                tst_data["throughput"]["PDR"]["LOWER"])
+                    elif table["include-tests"] == "NDR":
+                        if tst_data["type"] == "NDR":
+                            tbl_dict[tst_name_mod]["ref-data"]. \
+                                append(tst_data["throughput"]["value"])
+                        elif tst_data["type"] == "NDRPDR":
+                            tbl_dict[tst_name_mod]["ref-data"].append(
+                                tst_data["throughput"]["NDR"]["LOWER"])
+                    else:
+                        continue
                 except TypeError:
                     pass  # No data in output.xml for this test
 
     for job, builds in table["compare"]["data"].items():
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].iteritems():
+                tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
+                    replace("-ndrpdr", "").replace("-pdrdisc", ""). \
+                    replace("-ndrdisc", "").replace("-pdr", ""). \
+                    replace("-ndr", "").\
+                    replace("1t1c", "1c").replace("2t1c", "1c").\
+                    replace("2t2c", "2c").replace("4t2c", "2c").\
+                    replace("4t4c", "4c").replace("8t4c", "4c")
+                if "across topologies" in table["title"].lower():
+                    tst_name_mod = tst_name_mod.replace("2n1l-", "")
                 try:
-                    tbl_dict[tst_name]["cmp-data"].\
-                        append(tst_data["throughput"]["value"])
+                    # TODO: Re-work when NDRPDRDISC tests are not used
+                    if table["include-tests"] == "MRR":
+                        tbl_dict[tst_name_mod]["cmp-data"]. \
+                            append(tst_data["result"]["receive-rate"].avg)
+                    elif table["include-tests"] == "PDR":
+                        if tst_data["type"] == "PDR":
+                            tbl_dict[tst_name_mod]["cmp-data"]. \
+                                append(tst_data["throughput"]["value"])
+                        elif tst_data["type"] == "NDRPDR":
+                            tbl_dict[tst_name_mod]["cmp-data"].append(
+                                tst_data["throughput"]["PDR"]["LOWER"])
+                    elif table["include-tests"] == "NDR":
+                        if tst_data["type"] == "NDR":
+                            tbl_dict[tst_name_mod]["cmp-data"]. \
+                                append(tst_data["throughput"]["value"])
+                        elif tst_data["type"] == "NDRPDR":
+                            tbl_dict[tst_name_mod]["cmp-data"].append(
+                                tst_data["throughput"]["NDR"]["LOWER"])
+                    else:
+                        continue
                 except KeyError:
                     pass
                 except TypeError:
-                    tbl_dict.pop(tst_name, None)
+                    tbl_dict.pop(tst_name_mod, None)
     if history:
         for item in history:
             for job, builds in item["data"].items():
                 for build in builds:
                     for tst_name, tst_data in data[job][str(build)].iteritems():
-                        if tbl_dict.get(tst_name, None) is None:
+                        tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
+                            replace("-ndrpdr", "").replace("-pdrdisc", ""). \
+                            replace("-ndrdisc", "").replace("-pdr", ""). \
+                            replace("-ndr", "").\
+                            replace("1t1c", "1c").replace("2t1c", "1c").\
+                            replace("2t2c", "2c").replace("4t2c", "2c").\
+                            replace("4t4c", "4c").replace("8t4c", "4c")
+                        if "across topologies" in table["title"].lower():
+                            tst_name_mod = tst_name_mod.replace("2n1l-", "")
+                        if tbl_dict.get(tst_name_mod, None) is None:
                             continue
-                        if tbl_dict[tst_name].get("history", None) is None:
-                            tbl_dict[tst_name]["history"] = OrderedDict()
-                        if tbl_dict[tst_name]["history"].get(item["title"],
+                        if tbl_dict[tst_name_mod].get("history", None) is None:
+                            tbl_dict[tst_name_mod]["history"] = OrderedDict()
+                        if tbl_dict[tst_name_mod]["history"].get(item["title"],
                                                              None) is None:
-                            tbl_dict[tst_name]["history"][item["title"]] = \
+                            tbl_dict[tst_name_mod]["history"][item["title"]] = \
                                 list()
                         try:
-                            tbl_dict[tst_name]["history"][item["title"]].\
-                                append(tst_data["throughput"]["value"])
+                            # TODO: Re-work when NDRPDRDISC tests are not used
+                            if table["include-tests"] == "MRR":
+                                tbl_dict[tst_name_mod]["history"][item["title"
+                                ]].append(tst_data["result"]["receive-rate"].
+                                          avg)
+                            elif table["include-tests"] == "PDR":
+                                if tst_data["type"] == "PDR":
+                                    tbl_dict[tst_name_mod]["history"][
+                                        item["title"]].\
+                                        append(tst_data["throughput"]["value"])
+                                elif tst_data["type"] == "NDRPDR":
+                                    tbl_dict[tst_name_mod]["history"][item[
+                                        "title"]].append(tst_data["throughput"][
+                                        "PDR"]["LOWER"])
+                            elif table["include-tests"] == "NDR":
+                                if tst_data["type"] == "NDR":
+                                    tbl_dict[tst_name_mod]["history"][
+                                        item["title"]].\
+                                        append(tst_data["throughput"]["value"])
+                                elif tst_data["type"] == "NDRPDR":
+                                    tbl_dict[tst_name_mod]["history"][item[
+                                        "title"]].append(tst_data["throughput"][
+                                        "NDR"]["LOWER"])
+                            else:
+                                continue
                         except (TypeError, KeyError):
                             pass
 
@@ -470,213 +402,14 @@ def table_performance_comparison(table, input_data):
     # Sort the table according to the relative change
     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
 
-    # Generate tables:
-    # All tests in csv:
-    tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-                 ]
-    for file_name in tbl_names:
-        logging.info("      Writing file: '{0}'".format(file_name))
-        with open(file_name, "w") as file_handler:
-            file_handler.write(header_str)
-            for test in tbl_lst:
-                if (file_name.split("-")[-3] in test[0] and    # NDR vs PDR
-                        file_name.split("-")[-2] in test[0]):  # cores
-                    test[0] = "-".join(test[0].split("-")[:-1])
-                    file_handler.write(",".join([str(item) for item in test]) +
-                                       "\n")
-
-    # All tests in txt:
-    tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
-                     "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
-                     "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
-                     "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
-                     "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
-                     "{0}-pdr-4t4c-full.txt".format(table["output-file"])
-                     ]
-
-    for i, txt_name in enumerate(tbl_names_txt):
-        logging.info("      Writing file: '{0}'".format(txt_name))
-        convert_csv_to_pretty_txt(tbl_names[i], txt_name)
-
-    # Selected tests in csv:
-    input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    with open(input_file, "r") as in_file:
-        lines = list()
-        for line in in_file:
-            lines.append(line)
-
-    output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[1:]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-    output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
-                                                  table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[-1:0:-1]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-    input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    with open(input_file, "r") as in_file:
-        lines = list()
-        for line in in_file:
-            lines.append(line)
-
-    output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[1:]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-    output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
-                                                  table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[-1:0:-1]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-
-def table_performance_comparison_mrr(table, input_data):
-    """Generate the table(s) with algorithm: table_performance_comparison_mrr
-    specified in the specification file.
-
-    :param table: Table to generate.
-    :param input_data: Data to process.
-    :type table: pandas.Series
-    :type input_data: InputData
-    """
-
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
-
-    # Transform the data
-    logging.info("    Creating the data set for the {0} '{1}'.".
-                 format(table.get("type", ""), table.get("title", "")))
-    data = input_data.filter_data(table, continue_on_error=True)
-
-    # Prepare the header of the tables
-    try:
-        header = ["Test case",
-                  "{0} Throughput [Mpps]".format(table["reference"]["title"]),
-                  "{0} stdev [Mpps]".format(table["reference"]["title"]),
-                  "{0} Throughput [Mpps]".format(table["compare"]["title"]),
-                  "{0} stdev [Mpps]".format(table["compare"]["title"]),
-                  "Change [%]"]
-        header_str = ",".join(header) + "\n"
-    except (AttributeError, KeyError) as err:
-        logging.error("The model is invalid, missing parameter: {0}".
-                      format(err))
-        return
-
-    # Prepare data to the table:
-    tbl_dict = dict()
-    for job, builds in table["reference"]["data"].items():
-        for build in builds:
-            for tst_name, tst_data in data[job][str(build)].iteritems():
-                if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "ref-data": list(),
-                                          "cmp-data": list()}
-                try:
-                    tbl_dict[tst_name]["ref-data"].\
-                        append(tst_data["result"]["throughput"])
-                except TypeError:
-                    pass  # No data in output.xml for this test
-
-    for job, builds in table["compare"]["data"].items():
-        for build in builds:
-            for tst_name, tst_data in data[job][str(build)].iteritems():
-                try:
-                    tbl_dict[tst_name]["cmp-data"].\
-                        append(tst_data["result"]["throughput"])
-                except KeyError:
-                    pass
-                except TypeError:
-                    tbl_dict.pop(tst_name, None)
-
-    tbl_lst = list()
-    for tst_name in tbl_dict.keys():
-        item = [tbl_dict[tst_name]["name"], ]
-        data_t = tbl_dict[tst_name]["ref-data"]
-        if data_t:
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
-        else:
-            item.extend([None, None])
-        data_t = tbl_dict[tst_name]["cmp-data"]
-        if data_t:
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
-        else:
-            item.extend([None, None])
-        if item[1] is not None and item[3] is not None and item[1] != 0:
-            item.append(int(relative_change(float(item[1]), float(item[3]))))
-        if len(item) == 6:
-            tbl_lst.append(item)
-
-    # Sort the table according to the relative change
-    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+    # Generate csv tables:
+    csv_file = "{0}.csv".format(table["output-file"])
+    with open(csv_file, "w") as file_handler:
+        file_handler.write(header_str)
+        for test in tbl_lst:
+            file_handler.write(",".join([str(item) for item in test]) + "\n")
 
-    # Generate tables:
-    # All tests in csv:
-    tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
-                                           table["output-file-ext"]),
-                 "{0}-2t2c-full{1}".format(table["output-file"],
-                                           table["output-file-ext"]),
-                 "{0}-4t4c-full{1}".format(table["output-file"],
-                                           table["output-file-ext"])
-                 ]
-    for file_name in tbl_names:
-        logging.info("      Writing file: '{0}'".format(file_name))
-        with open(file_name, "w") as file_handler:
-            file_handler.write(header_str)
-            for test in tbl_lst:
-                if file_name.split("-")[-2] in test[0]:  # cores
-                    test[0] = "-".join(test[0].split("-")[:-1])
-                    file_handler.write(",".join([str(item) for item in test]) +
-                                       "\n")
-
-    # All tests in txt:
-    tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
-                     "{0}-2t2c-full.txt".format(table["output-file"]),
-                     "{0}-4t4c-full.txt".format(table["output-file"])
-                     ]
-
-    for i, txt_name in enumerate(tbl_names_txt):
-        logging.info("      Writing file: '{0}'".format(txt_name))
-        convert_csv_to_pretty_txt(tbl_names[i], txt_name)
+    convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
 
 
 def table_performance_trending_dashboard(table, input_data):
@@ -716,28 +449,30 @@ def table_performance_trending_dashboard(table, input_data):
                 if tst_name.lower() in table["ignore-list"]:
                     continue
                 if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "data": OrderedDict()}
+                    groups = re.search(REGEX_NIC, tst_data["parent"])
+                    if not groups:
+                        continue
+                    nic = groups.group(0)
+                    tbl_dict[tst_name] = {
+                        "name": "{0}-{1}".format(nic, tst_data["name"]),
+                        "data": OrderedDict()}
                 try:
-                    tbl_dict[tst_name]["data"][str(build)] =  \
-                        tst_data["result"]["throughput"]
+                    tbl_dict[tst_name]["data"][str(build)] = \
+                        tst_data["result"]["receive-rate"]
                 except (TypeError, KeyError):
                     pass  # No data in output.xml for this test
 
     tbl_lst = list()
     for tst_name in tbl_dict.keys():
-        if len(tbl_dict[tst_name]["data"]) < 2:
+        data_t = tbl_dict[tst_name]["data"]
+        if len(data_t) < 2:
             continue
 
-        data_t = pd.Series(tbl_dict[tst_name]["data"])
-
         classification_lst, avgs = classify_anomalies(data_t)
 
-        win_size = min(data_t.size, table["window"])
-        long_win_size = min(data_t.size, table["long-trend-window"])
+        win_size = min(len(data_t), table["window"])
+        long_win_size = min(len(data_t), table["long-trend-window"])
+
         try:
             max_long_avg = max(
                 [x for x in avgs[-long_win_size:-win_size]
@@ -794,12 +529,14 @@ def table_performance_trending_dashboard(table, input_data):
     convert_csv_to_pretty_txt(file_name, txt_file_name)
 
 
-def _generate_url(base, test_name):
+def _generate_url(base, testbed, test_name):
     """Generate URL to a trending plot from the name of the test case.
 
     :param base: The base part of URL common to all test cases.
+    :param testbed: The testbed used for testing.
     :param test_name: The name of the test case.
     :type base: str
+    :type testbed: str
     :type test_name: str
     :returns: The URL to the plot with the trending data for the given test
         case.
@@ -808,65 +545,105 @@ def _generate_url(base, test_name):
 
     url = base
     file_name = ""
-    anchor = "#"
+    anchor = ".html#"
     feature = ""
 
     if "lbdpdk" in test_name or "lbvpp" in test_name:
-        file_name = "link_bonding.html"
+        file_name = "link_bonding"
+
+    elif "114b" in test_name and "vhost" in test_name:
+        file_name = "vts"
 
     elif "testpmd" in test_name or "l3fwd" in test_name:
-        file_name = "dpdk.html"
+        file_name = "dpdk"
 
     elif "memif" in test_name:
-        file_name = "container_memif.html"
+        file_name = "container_memif"
+        feature = "-base"
 
     elif "srv6" in test_name:
-        file_name = "srv6.html"
+        file_name = "srv6"
 
     elif "vhost" in test_name:
         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
-            file_name = "vm_vhost_l2.html"
+            file_name = "vm_vhost_l2"
+            if "114b" in test_name:
+                feature = ""
+            elif "l2xcbase" in test_name:
+                feature = "-base-l2xc"
+            elif "l2bdbasemaclrn" in test_name:
+                feature = "-base-l2bd"
+            else:
+                feature = "-base"
         elif "ip4base" in test_name:
-            file_name = "vm_vhost_ip4.html"
+            file_name = "vm_vhost_ip4"
+            feature = "-base"
 
     elif "ipsec" in test_name:
-        file_name = "ipsec.html"
+        file_name = "ipsec"
+        feature = "-base-scale"
 
     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
-        file_name = "ip4_tunnels.html"
+        file_name = "ip4_tunnels"
+        feature = "-base"
 
     elif "ip4base" in test_name or "ip4scale" in test_name:
-        file_name = "ip4.html"
-        if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
+        file_name = "ip4"
+        if "xl710" in test_name:
+            feature = "-base-scale-features"
+        elif "iacl" in test_name:
+            feature = "-features-iacl"
+        elif "oacl" in test_name:
+            feature = "-features-oacl"
+        elif "snat" in test_name or "cop" in test_name:
             feature = "-features"
+        else:
+            feature = "-base-scale"
 
     elif "ip6base" in test_name or "ip6scale" in test_name:
-        file_name = "ip6.html"
+        file_name = "ip6"
+        feature = "-base-scale"
 
     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
-        file_name = "l2.html"
-        if "iacl" in test_name:
-            feature = "-features"
+        file_name = "l2"
+        if "macip" in test_name:
+            feature = "-features-macip"
+        elif "iacl" in test_name:
+            feature = "-features-iacl"
+        elif "oacl" in test_name:
+            feature = "-features-oacl"
+        else:
+            feature = "-base-scale"
 
     if "x520" in test_name:
-        anchor += "x520-"
+        nic = "x520-"
     elif "x710" in test_name:
-        anchor += "x710-"
+        nic = "x710-"
     elif "xl710" in test_name:
-        anchor += "xl710-"
+        nic = "xl710-"
+    elif "xxv710" in test_name:
+        nic = "xxv710-"
+    else:
+        nic = ""
+    anchor += nic
 
     if "64b" in test_name:
-        anchor += "64b-"
+        framesize = "64b"
     elif "78b" in test_name:
-        anchor += "78b-"
+        framesize = "78b"
     elif "imix" in test_name:
-        anchor += "imix-"
+        framesize = "imix"
     elif "9000b" in test_name:
-        anchor += "9000b-"
-    elif "1518" in test_name:
-        anchor += "1518b-"
+        framesize = "9000b"
+    elif "1518b" in test_name:
+        framesize = "1518b"
+    elif "114b" in test_name:
+        framesize = "114b"
+    else:
+        framesize = ""
+    anchor += framesize + '-'
 
     if "1t1c" in test_name:
         anchor += "1t1c"
@@ -874,8 +651,15 @@ def _generate_url(base, test_name):
         anchor += "2t2c"
     elif "4t4c" in test_name:
         anchor += "4t4c"
+    elif "2t1c" in test_name:
+        anchor += "2t1c"
+    elif "4t2c" in test_name:
+        anchor += "4t2c"
+    elif "8t4c" in test_name:
+        anchor += "8t4c"
 
-    return url + file_name + anchor + feature
+    return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
+           anchor + feature
 
 
 def table_performance_trending_dashboard_html(table, input_data):
@@ -885,10 +669,16 @@ def table_performance_trending_dashboard_html(table, input_data):
 
     :param table: Table to generate.
     :param input_data: Data to process.
-    :type table: pandas.Series
+    :type table: dict
     :type input_data: InputData
     """
 
+    testbed = table.get("testbed", None)
+    if testbed is None:
+        logging.error("The testbed is not defined for the table '{0}'.".
+                      format(table.get("title", "")))
+        return
+
     logging.info("  Generating the table {0} ...".
                  format(table.get("title", "")))
 
@@ -934,7 +724,7 @@ def table_performance_trending_dashboard_html(table, input_data):
             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
             # Name:
             if c_idx == 0:
-                url = _generate_url("../trending/", item)
+                url = _generate_url("../trending/", testbed, item)
                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
                 ref.text = item
             else:
@@ -977,6 +767,10 @@ def table_failed_tests(table, input_data):
 
     # Generate the data for the table according to the model in the table
     # specification
+
+    now = dt.utcnow()
+    timeperiod = timedelta(int(table.get("window", 7)))
+
     tbl_dict = dict()
     for job, builds in table["data"].items():
         for build in builds:
@@ -985,25 +779,32 @@ def table_failed_tests(table, input_data):
                 if tst_name.lower() in table["ignore-list"]:
                     continue
                 if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "data": OrderedDict()}
+                    groups = re.search(REGEX_NIC, tst_data["parent"])
+                    if not groups:
+                        continue
+                    nic = groups.group(0)
+                    tbl_dict[tst_name] = {
+                        "name": "{0}-{1}".format(nic, tst_data["name"]),
+                        "data": OrderedDict()}
                 try:
-                    tbl_dict[tst_name]["data"][build] = (
-                        tst_data["status"],
-                        input_data.metadata(job, build).get("generated", ""),
-                        input_data.metadata(job, build).get("version", ""),
-                        build)
+                    generated = input_data.metadata(job, build).\
+                        get("generated", "")
+                    if not generated:
+                        continue
+                    then = dt.strptime(generated, "%Y%m%d %H:%M")
+                    if (now - then) <= timeperiod:
+                        tbl_dict[tst_name]["data"][build] = (
+                            tst_data["status"],
+                            generated,
+                            input_data.metadata(job, build).get("version", ""),
+                            build)
                 except (TypeError, KeyError):
                     pass  # No data in output.xml for this test
 
     tbl_lst = list()
     for tst_data in tbl_dict.values():
-        win_size = min(len(tst_data["data"]), table["window"])
         fails_nr = 0
-        for val in tst_data["data"].values()[-win_size:]:
+        for val in tst_data["data"].values():
             if val[0] == "FAIL":
                 fails_nr += 1
                 fails_last_date = val[1]
@@ -1044,6 +845,12 @@ def table_failed_tests_html(table, input_data):
     :type input_data: InputData
     """
 
+    testbed = table.get("testbed", None)
+    if testbed is None:
+        logging.error("The testbed is not defined for the table '{0}'.".
+                      format(table.get("title", "")))
+        return
+
     logging.info("  Generating the table {0} ...".
                  format(table.get("title", "")))
 
@@ -1081,7 +888,7 @@ def table_failed_tests_html(table, input_data):
             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
             # Name:
             if c_idx == 0:
-                url = _generate_url("../trending/", item)
+                url = _generate_url("../trending/", testbed, item)
                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
                 ref.text = item
             else: