CSIT-1504: Soak tests - box plots
[csit.git] / resources / tools / presentation / generator_tables.py
index d42c734..f4c3e54 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 import logging
 import csv
 
 import logging
 import csv
+import re
 
 from string import replace
 from collections import OrderedDict
 from numpy import nan, isnan
 from xml.etree import ElementTree as ET
 
 from string import replace
 from collections import OrderedDict
 from numpy import nan, isnan
 from xml.etree import ElementTree as ET
+from datetime import datetime as dt
+from datetime import timedelta
 
 
-from errors import PresentationError
 from utils import mean, stdev, relative_change, classify_anomalies, \
     convert_csv_to_pretty_txt
 
 
 from utils import mean, stdev, relative_change, classify_anomalies, \
     convert_csv_to_pretty_txt
 
 
+REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
+
+
 def generate_tables(spec, data):
     """Generate all tables specified in the specification file.
 
 def generate_tables(spec, data):
     """Generate all tables specified in the specification file.
 
@@ -91,7 +96,7 @@ def table_details(table, input_data):
                     try:
                         col_data = str(data[job][build][test][column["data"].
                                        split(" ")[1]]).replace('"', '""')
                     try:
                         col_data = str(data[job][build][test][column["data"].
                                        split(" ")[1]]).replace('"', '""')
-                        if column["data"].split(" ")[1] in ("vat-history",
+                        if column["data"].split(" ")[1] in ("conf-history",
                                                             "show-run"):
                             col_data = replace(col_data, " |br| ", "",
                                                maxreplace=1)
                                                             "show-run"):
                             col_data = replace(col_data, " |br| ", "",
                                                maxreplace=1)
@@ -156,7 +161,9 @@ def table_merged_details(table, input_data):
                     try:
                         col_data = str(data[test][column["data"].
                                        split(" ")[1]]).replace('"', '""')
                     try:
                         col_data = str(data[test][column["data"].
                                        split(" ")[1]]).replace('"', '""')
-                        if column["data"].split(" ")[1] in ("vat-history",
+                        col_data = replace(col_data, "No Data",
+                                           "Not Captured     ")
+                        if column["data"].split(" ")[1] in ("conf-history",
                                                             "show-run"):
                             col_data = replace(col_data, " |br| ", "",
                                                maxreplace=1)
                                                             "show-run"):
                             col_data = replace(col_data, " |br| ", "",
                                                maxreplace=1)
@@ -164,7 +171,7 @@ def table_merged_details(table, input_data):
                                 format(col_data[:-5])
                         row_lst.append('"{0}"'.format(col_data))
                     except KeyError:
                                 format(col_data[:-5])
                         row_lst.append('"{0}"'.format(col_data))
                     except KeyError:
-                        row_lst.append("No data")
+                        row_lst.append('"Not captured"')
                 table_lst.append(row_lst)
 
         # Write the data to file
                 table_lst.append(row_lst)
 
         # Write the data to file
@@ -180,177 +187,6 @@ def table_merged_details(table, input_data):
     logging.info("  Done.")
 
 
     logging.info("  Done.")
 
 
-def table_performance_improvements(table, input_data):
-    """Generate the table(s) with algorithm: table_performance_improvements
-    specified in the specification file.
-
-    # FIXME: Not used now.
-
-    :param table: Table to generate.
-    :param input_data: Data to process.
-    :type table: pandas.Series
-    :type input_data: InputData
-    """
-
-    def _write_line_to_file(file_handler, data):
-        """Write a line to the .csv file.
-
-        :param file_handler: File handler for the csv file. It must be open for
-         writing text.
-        :param data: Item to be written to the file.
-        :type file_handler: BinaryIO
-        :type data: list
-        """
-
-        line_lst = list()
-        for item in data:
-            if isinstance(item["data"], str):
-                # Remove -?drdisc from the end
-                if item["data"].endswith("drdisc"):
-                    item["data"] = item["data"][:-8]
-                line_lst.append(item["data"])
-            elif isinstance(item["data"], float):
-                line_lst.append("{:.1f}".format(item["data"]))
-            elif item["data"] is None:
-                line_lst.append("")
-        file_handler.write(",".join(line_lst) + "\n")
-
-    logging.info("  Generating the table {0} ...".
-                 format(table.get("title", "")))
-
-    # Read the template
-    file_name = table.get("template", None)
-    if file_name:
-        try:
-            tmpl = _read_csv_template(file_name)
-        except PresentationError:
-            logging.error("  The template '{0}' does not exist. Skipping the "
-                          "table.".format(file_name))
-            return None
-    else:
-        logging.error("The template is not defined. Skipping the table.")
-        return None
-
-    # Transform the data
-    logging.info("    Creating the data set for the {0} '{1}'.".
-                 format(table.get("type", ""), table.get("title", "")))
-    data = input_data.filter_data(table)
-
-    # Prepare the header of the tables
-    header = list()
-    for column in table["columns"]:
-        header.append(column["title"])
-
-    # Generate the data for the table according to the model in the table
-    # specification
-    tbl_lst = list()
-    for tmpl_item in tmpl:
-        tbl_item = list()
-        for column in table["columns"]:
-            cmd = column["data"].split(" ")[0]
-            args = column["data"].split(" ")[1:]
-            if cmd == "template":
-                try:
-                    val = float(tmpl_item[int(args[0])])
-                except ValueError:
-                    val = tmpl_item[int(args[0])]
-                tbl_item.append({"data": val})
-            elif cmd == "data":
-                jobs = args[0:-1]
-                operation = args[-1]
-                data_lst = list()
-                for job in jobs:
-                    for build in data[job]:
-                        try:
-                            data_lst.append(float(build[tmpl_item[0]]
-                                                  ["throughput"]["value"]))
-                        except (KeyError, TypeError):
-                            # No data, ignore
-                            continue
-                if data_lst:
-                    tbl_item.append({"data": (eval(operation)(data_lst)) /
-                                             1000000})
-                else:
-                    tbl_item.append({"data": None})
-            elif cmd == "operation":
-                operation = args[0]
-                try:
-                    nr1 = float(tbl_item[int(args[1])]["data"])
-                    nr2 = float(tbl_item[int(args[2])]["data"])
-                    if nr1 and nr2:
-                        tbl_item.append({"data": eval(operation)(nr1, nr2)})
-                    else:
-                        tbl_item.append({"data": None})
-                except (IndexError, ValueError, TypeError):
-                    logging.error("No data for {0}".format(tbl_item[0]["data"]))
-                    tbl_item.append({"data": None})
-                    continue
-            else:
-                logging.error("Not supported command {0}. Skipping the table.".
-                              format(cmd))
-                return None
-        tbl_lst.append(tbl_item)
-
-    # Sort the table according to the relative change
-    tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
-
-    # Create the tables and write them to the files
-    file_names = [
-        "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
-        "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
-        "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
-        "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
-    ]
-
-    for file_name in file_names:
-        logging.info("    Writing the file '{0}'".format(file_name))
-        with open(file_name, "w") as file_handler:
-            file_handler.write(",".join(header) + "\n")
-            for item in tbl_lst:
-                if isinstance(item[-1]["data"], float):
-                    rel_change = round(item[-1]["data"], 1)
-                else:
-                    rel_change = item[-1]["data"]
-                if "ndr_top" in file_name \
-                        and "ndr" in item[0]["data"] \
-                        and rel_change >= 10.0:
-                    _write_line_to_file(file_handler, item)
-                elif "pdr_top" in file_name \
-                        and "pdr" in item[0]["data"] \
-                        and rel_change >= 10.0:
-                    _write_line_to_file(file_handler, item)
-                elif "ndr_low" in file_name \
-                        and "ndr" in item[0]["data"] \
-                        and rel_change < 10.0:
-                    _write_line_to_file(file_handler, item)
-                elif "pdr_low" in file_name \
-                        and "pdr" in item[0]["data"] \
-                        and rel_change < 10.0:
-                    _write_line_to_file(file_handler, item)
-
-    logging.info("  Done.")
-
-
-def _read_csv_template(file_name):
-    """Read the template from a .csv file.
-
-    :param file_name: Name / full path / relative path of the file to read.
-    :type file_name: str
-    :returns: Data from the template as list (lines) of lists (items on line).
-    :rtype: list
-    :raises: PresentationError if it is not possible to read the file.
-    """
-
-    try:
-        with open(file_name, 'r') as csv_file:
-            tmpl_data = list()
-            for line in csv_file:
-                tmpl_data.append(line[:-1].split(","))
-        return tmpl_data
-    except IOError as err:
-        raise PresentationError(str(err), level="ERROR")
-
-
 def table_performance_comparison(table, input_data):
     """Generate the table(s) with algorithm: table_performance_comparison
     specified in the specification file.
 def table_performance_comparison(table, input_data):
     """Generate the table(s) with algorithm: table_performance_comparison
     specified in the specification file.
@@ -373,18 +209,23 @@ def table_performance_comparison(table, input_data):
     try:
         header = ["Test case", ]
 
     try:
         header = ["Test case", ]
 
+        if table["include-tests"] == "MRR":
+            hdr_param = "Receive Rate"
+        else:
+            hdr_param = "Throughput"
+
         history = table.get("history", None)
         if history:
             for item in history:
                 header.extend(
         history = table.get("history", None)
         if history:
             for item in history:
                 header.extend(
-                    ["{0} Throughput [Mpps]".format(item["title"]),
+                    ["{0} {1} [Mpps]".format(item["title"], hdr_param),
                      "{0} Stdev [Mpps]".format(item["title"])])
         header.extend(
                      "{0} Stdev [Mpps]".format(item["title"])])
         header.extend(
-            ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
+            ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
              "{0} Stdev [Mpps]".format(table["reference"]["title"]),
-             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+             "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
              "{0} Stdev [Mpps]".format(table["compare"]["title"]),
-             "Change [%]"])
+             "Delta [%]"])
         header_str = ",".join(header) + "\n"
     except (AttributeError, KeyError) as err:
         logging.error("The model is invalid, missing parameter: {0}".
         header_str = ",".join(header) + "\n"
     except (AttributeError, KeyError) as err:
         logging.error("The model is invalid, missing parameter: {0}".
@@ -396,45 +237,138 @@ def table_performance_comparison(table, input_data):
     for job, builds in table["reference"]["data"].items():
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].iteritems():
     for job, builds in table["reference"]["data"].items():
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].iteritems():
-                if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "ref-data": list(),
-                                          "cmp-data": list()}
+                tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
+                    replace("-ndrpdr", "").replace("-pdrdisc", "").\
+                    replace("-ndrdisc", "").replace("-pdr", "").\
+                    replace("-ndr", "").\
+                    replace("1t1c", "1c").replace("2t1c", "1c").\
+                    replace("2t2c", "2c").replace("4t2c", "2c").\
+                    replace("4t4c", "4c").replace("8t4c", "4c")
+                if "across topologies" in table["title"].lower():
+                    tst_name_mod = tst_name_mod.replace("2n1l-", "")
+                if tbl_dict.get(tst_name_mod, None) is None:
+                    groups = re.search(REGEX_NIC, tst_data["parent"])
+                    nic = groups.group(0) if groups else ""
+                    name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
+                                                          split("-")[:-1]))
+                    if "across testbeds" in table["title"].lower() or \
+                            "across topologies" in table["title"].lower():
+                        name = name.\
+                            replace("1t1c", "1c").replace("2t1c", "1c").\
+                            replace("2t2c", "2c").replace("4t2c", "2c").\
+                            replace("4t4c", "4c").replace("8t4c", "4c")
+                    tbl_dict[tst_name_mod] = {"name": name,
+                                              "ref-data": list(),
+                                              "cmp-data": list()}
                 try:
                 try:
-                    tbl_dict[tst_name]["ref-data"].\
-                        append(tst_data["throughput"]["value"])
+                    # TODO: Re-work when NDRPDRDISC tests are not used
+                    if table["include-tests"] == "MRR":
+                        tbl_dict[tst_name_mod]["ref-data"]. \
+                            append(tst_data["result"]["receive-rate"].avg)
+                    elif table["include-tests"] == "PDR":
+                        if tst_data["type"] == "PDR":
+                            tbl_dict[tst_name_mod]["ref-data"]. \
+                                append(tst_data["throughput"]["value"])
+                        elif tst_data["type"] == "NDRPDR":
+                            tbl_dict[tst_name_mod]["ref-data"].append(
+                                tst_data["throughput"]["PDR"]["LOWER"])
+                    elif table["include-tests"] == "NDR":
+                        if tst_data["type"] == "NDR":
+                            tbl_dict[tst_name_mod]["ref-data"]. \
+                                append(tst_data["throughput"]["value"])
+                        elif tst_data["type"] == "NDRPDR":
+                            tbl_dict[tst_name_mod]["ref-data"].append(
+                                tst_data["throughput"]["NDR"]["LOWER"])
+                    else:
+                        continue
                 except TypeError:
                     pass  # No data in output.xml for this test
 
     for job, builds in table["compare"]["data"].items():
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].iteritems():
                 except TypeError:
                     pass  # No data in output.xml for this test
 
     for job, builds in table["compare"]["data"].items():
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].iteritems():
+                tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
+                    replace("-ndrpdr", "").replace("-pdrdisc", ""). \
+                    replace("-ndrdisc", "").replace("-pdr", ""). \
+                    replace("-ndr", "").\
+                    replace("1t1c", "1c").replace("2t1c", "1c").\
+                    replace("2t2c", "2c").replace("4t2c", "2c").\
+                    replace("4t4c", "4c").replace("8t4c", "4c")
+                if "across topologies" in table["title"].lower():
+                    tst_name_mod = tst_name_mod.replace("2n1l-", "")
                 try:
                 try:
-                    tbl_dict[tst_name]["cmp-data"].\
-                        append(tst_data["throughput"]["value"])
+                    # TODO: Re-work when NDRPDRDISC tests are not used
+                    if table["include-tests"] == "MRR":
+                        tbl_dict[tst_name_mod]["cmp-data"]. \
+                            append(tst_data["result"]["receive-rate"].avg)
+                    elif table["include-tests"] == "PDR":
+                        if tst_data["type"] == "PDR":
+                            tbl_dict[tst_name_mod]["cmp-data"]. \
+                                append(tst_data["throughput"]["value"])
+                        elif tst_data["type"] == "NDRPDR":
+                            tbl_dict[tst_name_mod]["cmp-data"].append(
+                                tst_data["throughput"]["PDR"]["LOWER"])
+                    elif table["include-tests"] == "NDR":
+                        if tst_data["type"] == "NDR":
+                            tbl_dict[tst_name_mod]["cmp-data"]. \
+                                append(tst_data["throughput"]["value"])
+                        elif tst_data["type"] == "NDRPDR":
+                            tbl_dict[tst_name_mod]["cmp-data"].append(
+                                tst_data["throughput"]["NDR"]["LOWER"])
+                    else:
+                        continue
                 except KeyError:
                     pass
                 except TypeError:
                 except KeyError:
                     pass
                 except TypeError:
-                    tbl_dict.pop(tst_name, None)
+                    tbl_dict.pop(tst_name_mod, None)
     if history:
         for item in history:
             for job, builds in item["data"].items():
                 for build in builds:
                     for tst_name, tst_data in data[job][str(build)].iteritems():
     if history:
         for item in history:
             for job, builds in item["data"].items():
                 for build in builds:
                     for tst_name, tst_data in data[job][str(build)].iteritems():
-                        if tbl_dict.get(tst_name, None) is None:
+                        tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
+                            replace("-ndrpdr", "").replace("-pdrdisc", ""). \
+                            replace("-ndrdisc", "").replace("-pdr", ""). \
+                            replace("-ndr", "").\
+                            replace("1t1c", "1c").replace("2t1c", "1c").\
+                            replace("2t2c", "2c").replace("4t2c", "2c").\
+                            replace("4t4c", "4c").replace("8t4c", "4c")
+                        if "across topologies" in table["title"].lower():
+                            tst_name_mod = tst_name_mod.replace("2n1l-", "")
+                        if tbl_dict.get(tst_name_mod, None) is None:
                             continue
                             continue
-                        if tbl_dict[tst_name].get("history", None) is None:
-                            tbl_dict[tst_name]["history"] = OrderedDict()
-                        if tbl_dict[tst_name]["history"].get(item["title"],
+                        if tbl_dict[tst_name_mod].get("history", None) is None:
+                            tbl_dict[tst_name_mod]["history"] = OrderedDict()
+                        if tbl_dict[tst_name_mod]["history"].get(item["title"],
                                                              None) is None:
                                                              None) is None:
-                            tbl_dict[tst_name]["history"][item["title"]] = \
+                            tbl_dict[tst_name_mod]["history"][item["title"]] = \
                                 list()
                         try:
                                 list()
                         try:
-                            tbl_dict[tst_name]["history"][item["title"]].\
-                                append(tst_data["throughput"]["value"])
+                            # TODO: Re-work when NDRPDRDISC tests are not used
+                            if table["include-tests"] == "MRR":
+                                tbl_dict[tst_name_mod]["history"][item["title"
+                                ]].append(tst_data["result"]["receive-rate"].
+                                          avg)
+                            elif table["include-tests"] == "PDR":
+                                if tst_data["type"] == "PDR":
+                                    tbl_dict[tst_name_mod]["history"][
+                                        item["title"]].\
+                                        append(tst_data["throughput"]["value"])
+                                elif tst_data["type"] == "NDRPDR":
+                                    tbl_dict[tst_name_mod]["history"][item[
+                                        "title"]].append(tst_data["throughput"][
+                                        "PDR"]["LOWER"])
+                            elif table["include-tests"] == "NDR":
+                                if tst_data["type"] == "NDR":
+                                    tbl_dict[tst_name_mod]["history"][
+                                        item["title"]].\
+                                        append(tst_data["throughput"]["value"])
+                                elif tst_data["type"] == "NDRPDR":
+                                    tbl_dict[tst_name_mod]["history"][item[
+                                        "title"]].append(tst_data["throughput"][
+                                        "NDR"]["LOWER"])
+                            else:
+                                continue
                         except (TypeError, KeyError):
                             pass
 
                         except (TypeError, KeyError):
                             pass
 
@@ -471,103 +405,18 @@ def table_performance_comparison(table, input_data):
     # Sort the table according to the relative change
     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
 
     # Sort the table according to the relative change
     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
 
-    # Generate tables:
-    # All tests in csv:
-    tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"]),
-                 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-                 ]
-    for file_name in tbl_names:
-        logging.info("      Writing file: '{0}'".format(file_name))
-        with open(file_name, "w") as file_handler:
-            file_handler.write(header_str)
-            for test in tbl_lst:
-                if (file_name.split("-")[-3] in test[0] and    # NDR vs PDR
-                        file_name.split("-")[-2] in test[0]):  # cores
-                    test[0] = "-".join(test[0].split("-")[:-1])
-                    file_handler.write(",".join([str(item) for item in test]) +
-                                       "\n")
-
-    # All tests in txt:
-    tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
-                     "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
-                     "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
-                     "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
-                     "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
-                     "{0}-pdr-4t4c-full.txt".format(table["output-file"])
-                     ]
-
-    for i, txt_name in enumerate(tbl_names_txt):
-        logging.info("      Writing file: '{0}'".format(txt_name))
-        convert_csv_to_pretty_txt(tbl_names[i], txt_name)
-
-    # Selected tests in csv:
-    input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    with open(input_file, "r") as in_file:
-        lines = list()
-        for line in in_file:
-            lines.append(line)
-
-    output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[1:]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-    output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
-                                                  table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[-1:0:-1]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-    input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    with open(input_file, "r") as in_file:
-        lines = list()
-        for line in in_file:
-            lines.append(line)
-
-    output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
-                                               table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[1:]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-    output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
-                                                  table["output-file-ext"])
-    logging.info("      Writing file: '{0}'".format(output_file))
-    with open(output_file, "w") as out_file:
-        out_file.write(header_str)
-        for i, line in enumerate(lines[-1:0:-1]):
-            if i == table["nr-of-tests-shown"]:
-                break
-            out_file.write(line)
-
-
-def table_performance_comparison_mrr(table, input_data):
-    """Generate the table(s) with algorithm: table_performance_comparison_mrr
+    # Generate csv tables:
+    csv_file = "{0}.csv".format(table["output-file"])
+    with open(csv_file, "w") as file_handler:
+        file_handler.write(header_str)
+        for test in tbl_lst:
+            file_handler.write(",".join([str(item) for item in test]) + "\n")
+
+    convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
+
+
+def table_nics_comparison(table, input_data):
+    """Generate the table(s) with algorithm: table_nics_comparison
     specified in the specification file.
 
     :param table: Table to generate.
     specified in the specification file.
 
     :param table: Table to generate.
@@ -586,12 +435,19 @@ def table_performance_comparison_mrr(table, input_data):
 
     # Prepare the header of the tables
     try:
 
     # Prepare the header of the tables
     try:
-        header = ["Test case",
-                  "{0} Throughput [Mpps]".format(table["reference"]["title"]),
-                  "{0} stdev [Mpps]".format(table["reference"]["title"]),
-                  "{0} Throughput [Mpps]".format(table["compare"]["title"]),
-                  "{0} stdev [Mpps]".format(table["compare"]["title"]),
-                  "Change [%]"]
+        header = ["Test case", ]
+
+        if table["include-tests"] == "MRR":
+            hdr_param = "Receive Rate"
+        else:
+            hdr_param = "Throughput"
+
+        header.extend(
+            ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
+             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
+             "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
+             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
+             "Delta [%]"])
         header_str = ",".join(header) + "\n"
     except (AttributeError, KeyError) as err:
         logging.error("The model is invalid, missing parameter: {0}".
         header_str = ",".join(header) + "\n"
     except (AttributeError, KeyError) as err:
         logging.error("The model is invalid, missing parameter: {0}".
@@ -600,32 +456,41 @@ def table_performance_comparison_mrr(table, input_data):
 
     # Prepare data to the table:
     tbl_dict = dict()
 
     # Prepare data to the table:
     tbl_dict = dict()
-    for job, builds in table["reference"]["data"].items():
-        for build in builds:
-            for tst_name, tst_data in data[job][str(build)].iteritems():
-                if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "ref-data": list(),
-                                          "cmp-data": list()}
-                try:
-                    tbl_dict[tst_name]["ref-data"].\
-                        append(tst_data["result"]["receive-rate"].avg)
-                except TypeError:
-                    pass  # No data in output.xml for this test
-
-    for job, builds in table["compare"]["data"].items():
+    for job, builds in table["data"].items():
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].iteritems():
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].iteritems():
+                tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
+                    replace("-ndrpdr", "").replace("-pdrdisc", "").\
+                    replace("-ndrdisc", "").replace("-pdr", "").\
+                    replace("-ndr", "").\
+                    replace("1t1c", "1c").replace("2t1c", "1c").\
+                    replace("2t2c", "2c").replace("4t2c", "2c").\
+                    replace("4t4c", "4c").replace("8t4c", "4c")
+                tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
+                if tbl_dict.get(tst_name_mod, None) is None:
+                    name = "-".join(tst_data["name"].split("-")[:-1])
+                    tbl_dict[tst_name_mod] = {"name": name,
+                                              "ref-data": list(),
+                                              "cmp-data": list()}
                 try:
                 try:
-                    tbl_dict[tst_name]["cmp-data"].\
-                        append(tst_data["result"]["receive-rate"].avg)
-                except KeyError:
-                    pass
-                except TypeError:
-                    tbl_dict.pop(tst_name, None)
+                    if table["include-tests"] == "MRR":
+                        result = tst_data["result"]["receive-rate"].avg
+                    elif table["include-tests"] == "PDR":
+                        result = tst_data["throughput"]["PDR"]["LOWER"]
+                    elif table["include-tests"] == "NDR":
+                        result = tst_data["throughput"]["NDR"]["LOWER"]
+                    else:
+                        result = None
+
+                    if result:
+                        if table["reference"]["nic"] in tst_data["tags"]:
+                            tbl_dict[tst_name_mod]["ref-data"].append(result)
+                        elif table["compare"]["nic"] in tst_data["tags"]:
+                            tbl_dict[tst_name_mod]["cmp-data"].append(result)
+                except (TypeError, KeyError) as err:
+                    logging.debug("No data for {0}".format(tst_name))
+                    logging.debug(repr(err))
+                    # No data in output.xml for this test
 
     tbl_lst = list()
     for tst_name in tbl_dict.keys():
 
     tbl_lst = list()
     for tst_name in tbl_dict.keys():
@@ -642,42 +507,138 @@ def table_performance_comparison_mrr(table, input_data):
             item.append(round(stdev(data_t) / 1000000, 2))
         else:
             item.extend([None, None])
             item.append(round(stdev(data_t) / 1000000, 2))
         else:
             item.extend([None, None])
-        if item[1] is not None and item[3] is not None and item[1] != 0:
-            item.append(int(relative_change(float(item[1]), float(item[3]))))
-        if len(item) == 6:
+        if item[-4] is not None and item[-2] is not None and item[-4] != 0:
+            item.append(int(relative_change(float(item[-4]), float(item[-2]))))
+        if len(item) == len(header):
+            tbl_lst.append(item)
+
+    # Sort the table according to the relative change
+    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+
+    # Generate csv tables:
+    csv_file = "{0}.csv".format(table["output-file"])
+    with open(csv_file, "w") as file_handler:
+        file_handler.write(header_str)
+        for test in tbl_lst:
+            file_handler.write(",".join([str(item) for item in test]) + "\n")
+
+    convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
+
+
+def table_soak_vs_ndr(table, input_data):
+    """Generate the table(s) with algorithm: table_soak_vs_ndr
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    logging.info("  Generating the table {0} ...".
+                 format(table.get("title", "")))
+
+    # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
+    data = input_data.filter_data(table, continue_on_error=True)
+
+    # Prepare the header of the table
+    try:
+        header = [
+            "Test case",
+            "{0} Throughput [Mpps]".format(table["reference"]["title"]),
+            "{0} Stdev [Mpps]".format(table["reference"]["title"]),
+            "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+            "{0} Stdev [Mpps]".format(table["compare"]["title"]),
+            "Delta [%]"]
+        header_str = ",".join(header) + "\n"
+    except (AttributeError, KeyError) as err:
+        logging.error("The model is invalid, missing parameter: {0}".
+                      format(err))
+        return
+
+    # Create a list of available SOAK test results:
+    tbl_dict = dict()
+    for job, builds in table["compare"]["data"].items():
+        for build in builds:
+            for tst_name, tst_data in data[job][str(build)].iteritems():
+                if tst_data["type"] == "SOAK":
+                    tst_name_mod = tst_name.replace("-soak", "")
+                    if tbl_dict.get(tst_name_mod, None) is None:
+                        groups = re.search(REGEX_NIC, tst_data["parent"])
+                        nic = groups.group(0) if groups else ""
+                        name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
+                                                              split("-")[:-1]))
+                        tbl_dict[tst_name_mod] = {
+                            "name": name,
+                            "ref-data": list(),
+                            "cmp-data": list()
+                        }
+                    try:
+                        tbl_dict[tst_name_mod]["cmp-data"].append(
+                            tst_data["throughput"]["LOWER"])
+                    except (KeyError, TypeError):
+                        pass
+    tests_lst = tbl_dict.keys()
+
+    # Add corresponding NDR test results:
+    for job, builds in table["reference"]["data"].items():
+        for build in builds:
+            for tst_name, tst_data in data[job][str(build)].iteritems():
+                tst_name_mod = tst_name.replace("-ndrpdr", "").\
+                    replace("-mrr", "")
+                if tst_name_mod in tests_lst:
+                    try:
+                        if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
+                            if table["include-tests"] == "MRR":
+                                result = tst_data["result"]["receive-rate"].avg
+                            elif table["include-tests"] == "PDR":
+                                result = tst_data["throughput"]["PDR"]["LOWER"]
+                            elif table["include-tests"] == "NDR":
+                                result = tst_data["throughput"]["NDR"]["LOWER"]
+                            else:
+                                result = None
+                            if result is not None:
+                                tbl_dict[tst_name_mod]["ref-data"].append(
+                                    result)
+                    except (KeyError, TypeError):
+                        continue
+
+    tbl_lst = list()
+    for tst_name in tbl_dict.keys():
+        item = [tbl_dict[tst_name]["name"], ]
+        data_r = tbl_dict[tst_name]["ref-data"]
+        if data_r:
+            data_r_mean = mean(data_r)
+            item.append(round(data_r_mean / 1000000, 2))
+            item.append(round(stdev(data_r) / 1000000, 2))
+        else:
+            data_r_mean = None
+            item.extend([None, None])
+        data_c = tbl_dict[tst_name]["cmp-data"]
+        if data_c:
+            data_c_mean = mean(data_c)
+            item.append(round(data_c_mean / 1000000, 2))
+            item.append(round(stdev(data_c) / 1000000, 2))
+        else:
+            data_c_mean = None
+            item.extend([None, None])
+        if data_r_mean and data_c_mean is not None:
+            item.append(round(relative_change(data_r_mean, data_c_mean), 2))
             tbl_lst.append(item)
 
     # Sort the table according to the relative change
     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
 
             tbl_lst.append(item)
 
     # Sort the table according to the relative change
     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
 
-    # Generate tables:
-    # All tests in csv:
-    tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
-                                           table["output-file-ext"]),
-                 "{0}-2t2c-full{1}".format(table["output-file"],
-                                           table["output-file-ext"]),
-                 "{0}-4t4c-full{1}".format(table["output-file"],
-                                           table["output-file-ext"])
-                 ]
-    for file_name in tbl_names:
-        logging.info("      Writing file: '{0}'".format(file_name))
-        with open(file_name, "w") as file_handler:
-            file_handler.write(header_str)
-            for test in tbl_lst:
-                if file_name.split("-")[-2] in test[0]:  # cores
-                    test[0] = "-".join(test[0].split("-")[:-1])
-                    file_handler.write(",".join([str(item) for item in test]) +
-                                       "\n")
-
-    # All tests in txt:
-    tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
-                     "{0}-2t2c-full.txt".format(table["output-file"]),
-                     "{0}-4t4c-full.txt".format(table["output-file"])
-                     ]
-
-    for i, txt_name in enumerate(tbl_names_txt):
-        logging.info("      Writing file: '{0}'".format(txt_name))
-        convert_csv_to_pretty_txt(tbl_names[i], txt_name)
+    # Generate csv tables:
+    csv_file = "{0}.csv".format(table["output-file"])
+    with open(csv_file, "w") as file_handler:
+        file_handler.write(header_str)
+        for test in tbl_lst:
+            file_handler.write(",".join([str(item) for item in test]) + "\n")
+
+    convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
 
 
 def table_performance_trending_dashboard(table, input_data):
 
 
 def table_performance_trending_dashboard(table, input_data):
@@ -717,13 +678,15 @@ def table_performance_trending_dashboard(table, input_data):
                 if tst_name.lower() in table["ignore-list"]:
                     continue
                 if tbl_dict.get(tst_name, None) is None:
                 if tst_name.lower() in table["ignore-list"]:
                     continue
                 if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "data": OrderedDict()}
+                    groups = re.search(REGEX_NIC, tst_data["parent"])
+                    if not groups:
+                        continue
+                    nic = groups.group(0)
+                    tbl_dict[tst_name] = {
+                        "name": "{0}-{1}".format(nic, tst_data["name"]),
+                        "data": OrderedDict()}
                 try:
                 try:
-                    tbl_dict[tst_name]["data"][str(build)] =  \
+                    tbl_dict[tst_name]["data"][str(build)] = \
                         tst_data["result"]["receive-rate"]
                 except (TypeError, KeyError):
                     pass  # No data in output.xml for this test
                         tst_data["result"]["receive-rate"]
                 except (TypeError, KeyError):
                     pass  # No data in output.xml for this test
@@ -763,12 +726,15 @@ def table_performance_trending_dashboard(table, input_data):
         if classification_lst:
             if isnan(rel_change_last) and isnan(rel_change_long):
                 continue
         if classification_lst:
             if isnan(rel_change_last) and isnan(rel_change_long):
                 continue
+            if (isnan(last_avg) or
+                isnan(rel_change_last) or
+                isnan(rel_change_long)):
+                continue
             tbl_lst.append(
                 [tbl_dict[tst_name]["name"],
             tbl_lst.append(
                 [tbl_dict[tst_name]["name"],
-                 '-' if isnan(last_avg) else
                  round(last_avg / 1000000, 2),
                  round(last_avg / 1000000, 2),
-                 '-' if isnan(rel_change_last) else rel_change_last,
-                 '-' if isnan(rel_change_long) else rel_change_long,
+                 rel_change_last,
+                 rel_change_long,
                  classification_lst[-win_size:].count("regression"),
                  classification_lst[-win_size:].count("progression")])
 
                  classification_lst[-win_size:].count("regression"),
                  classification_lst[-win_size:].count("progression")])
 
@@ -795,12 +761,14 @@ def table_performance_trending_dashboard(table, input_data):
     convert_csv_to_pretty_txt(file_name, txt_file_name)
 
 
     convert_csv_to_pretty_txt(file_name, txt_file_name)
 
 
-def _generate_url(base, test_name):
+def _generate_url(base, testbed, test_name):
     """Generate URL to a trending plot from the name of the test case.
 
     :param base: The base part of URL common to all test cases.
     """Generate URL to a trending plot from the name of the test case.
 
     :param base: The base part of URL common to all test cases.
+    :param testbed: The testbed used for testing.
     :param test_name: The name of the test case.
     :type base: str
     :param test_name: The name of the test case.
     :type base: str
+    :type testbed: str
     :type test_name: str
     :returns: The URL to the plot with the trending data for the given test
         case.
     :type test_name: str
     :returns: The URL to the plot with the trending data for the given test
         case.
@@ -809,65 +777,109 @@ def _generate_url(base, test_name):
 
     url = base
     file_name = ""
 
     url = base
     file_name = ""
-    anchor = "#"
+    anchor = ".html#"
     feature = ""
 
     if "lbdpdk" in test_name or "lbvpp" in test_name:
     feature = ""
 
     if "lbdpdk" in test_name or "lbvpp" in test_name:
-        file_name = "link_bonding.html"
+        file_name = "link_bonding"
+
+    elif "114b" in test_name and "vhost" in test_name:
+        file_name = "vts"
 
     elif "testpmd" in test_name or "l3fwd" in test_name:
 
     elif "testpmd" in test_name or "l3fwd" in test_name:
-        file_name = "dpdk.html"
+        file_name = "dpdk"
 
     elif "memif" in test_name:
 
     elif "memif" in test_name:
-        file_name = "container_memif.html"
+        file_name = "container_memif"
+        feature = "-base"
 
     elif "srv6" in test_name:
 
     elif "srv6" in test_name:
-        file_name = "srv6.html"
+        file_name = "srv6"
 
     elif "vhost" in test_name:
         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
 
     elif "vhost" in test_name:
         if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
-            file_name = "vm_vhost_l2.html"
+            file_name = "vm_vhost_l2"
+            if "114b" in test_name:
+                feature = ""
+            elif "l2xcbase" in test_name and "x520" in test_name:
+                feature = "-base-l2xc"
+            elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
+                feature = "-base-l2bd"
+            else:
+                feature = "-base"
         elif "ip4base" in test_name:
         elif "ip4base" in test_name:
-            file_name = "vm_vhost_ip4.html"
+            file_name = "vm_vhost_ip4"
+            feature = "-base"
 
     elif "ipsec" in test_name:
 
     elif "ipsec" in test_name:
-        file_name = "ipsec.html"
+        file_name = "ipsec"
+        feature = "-base-scale"
 
     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
 
     elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
-        file_name = "ip4_tunnels.html"
+        file_name = "ip4_tunnels"
+        feature = "-base"
 
     elif "ip4base" in test_name or "ip4scale" in test_name:
 
     elif "ip4base" in test_name or "ip4scale" in test_name:
-        file_name = "ip4.html"
-        if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
+        file_name = "ip4"
+        if "xl710" in test_name:
+            feature = "-base-scale-features"
+        elif "iacl" in test_name:
+            feature = "-features-iacl"
+        elif "oacl" in test_name:
+            feature = "-features-oacl"
+        elif "snat" in test_name or "cop" in test_name:
             feature = "-features"
             feature = "-features"
+        else:
+            feature = "-base-scale"
 
     elif "ip6base" in test_name or "ip6scale" in test_name:
 
     elif "ip6base" in test_name or "ip6scale" in test_name:
-        file_name = "ip6.html"
+        file_name = "ip6"
+        feature = "-base-scale"
 
     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
 
     elif "l2xcbase" in test_name or "l2xcscale" in test_name \
             or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
             or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
-        file_name = "l2.html"
-        if "iacl" in test_name:
-            feature = "-features"
+        file_name = "l2"
+        if "macip" in test_name:
+            feature = "-features-macip"
+        elif "iacl" in test_name:
+            feature = "-features-iacl"
+        elif "oacl" in test_name:
+            feature = "-features-oacl"
+        else:
+            feature = "-base-scale"
 
     if "x520" in test_name:
 
     if "x520" in test_name:
-        anchor += "x520-"
+        nic = "x520-"
     elif "x710" in test_name:
     elif "x710" in test_name:
-        anchor += "x710-"
+        nic = "x710-"
     elif "xl710" in test_name:
     elif "xl710" in test_name:
-        anchor += "xl710-"
+        nic = "xl710-"
+    elif "xxv710" in test_name:
+        nic = "xxv710-"
+    elif "vic1227" in test_name:
+        nic = "vic1227-"
+    elif "vic1385" in test_name:
+        nic = "vic1385-"
+    else:
+        nic = ""
+    anchor += nic
 
     if "64b" in test_name:
 
     if "64b" in test_name:
-        anchor += "64b-"
+        framesize = "64b"
     elif "78b" in test_name:
     elif "78b" in test_name:
-        anchor += "78b-"
+        framesize = "78b"
     elif "imix" in test_name:
     elif "imix" in test_name:
-        anchor += "imix-"
+        framesize = "imix"
     elif "9000b" in test_name:
     elif "9000b" in test_name:
-        anchor += "9000b-"
-    elif "1518" in test_name:
-        anchor += "1518b-"
+        framesize = "9000b"
+    elif "1518b" in test_name:
+        framesize = "1518b"
+    elif "114b" in test_name:
+        framesize = "114b"
+    else:
+        framesize = ""
+    anchor += framesize + '-'
 
     if "1t1c" in test_name:
         anchor += "1t1c"
 
     if "1t1c" in test_name:
         anchor += "1t1c"
@@ -875,8 +887,15 @@ def _generate_url(base, test_name):
         anchor += "2t2c"
     elif "4t4c" in test_name:
         anchor += "4t4c"
         anchor += "2t2c"
     elif "4t4c" in test_name:
         anchor += "4t4c"
+    elif "2t1c" in test_name:
+        anchor += "2t1c"
+    elif "4t2c" in test_name:
+        anchor += "4t2c"
+    elif "8t4c" in test_name:
+        anchor += "8t4c"
 
 
-    return url + file_name + anchor + feature
+    return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \
+           anchor + feature
 
 
 def table_performance_trending_dashboard_html(table, input_data):
 
 
 def table_performance_trending_dashboard_html(table, input_data):
@@ -886,10 +905,16 @@ def table_performance_trending_dashboard_html(table, input_data):
 
     :param table: Table to generate.
     :param input_data: Data to process.
 
     :param table: Table to generate.
     :param input_data: Data to process.
-    :type table: pandas.Series
+    :type table: dict
     :type input_data: InputData
     """
 
     :type input_data: InputData
     """
 
+    testbed = table.get("testbed", None)
+    if testbed is None:
+        logging.error("The testbed is not defined for the table '{0}'.".
+                      format(table.get("title", "")))
+        return
+
     logging.info("  Generating the table {0} ...".
                  format(table.get("title", "")))
 
     logging.info("  Generating the table {0} ...".
                  format(table.get("title", "")))
 
@@ -935,7 +960,7 @@ def table_performance_trending_dashboard_html(table, input_data):
             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
             # Name:
             if c_idx == 0:
             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
             # Name:
             if c_idx == 0:
-                url = _generate_url("../trending/", item)
+                url = _generate_url("../trending/", testbed, item)
                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
                 ref.text = item
             else:
                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
                 ref.text = item
             else:
@@ -951,6 +976,57 @@ def table_performance_trending_dashboard_html(table, input_data):
         return
 
 
         return
 
 
+def table_last_failed_tests(table, input_data):
+    """Generate the table(s) with algorithm: table_last_failed_tests
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    logging.info("  Generating the table {0} ...".
+                 format(table.get("title", "")))
+
+    # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
+    data = input_data.filter_data(table, continue_on_error=True)
+
+    if data is None or data.empty:
+        logging.warn("    No data for the {0} '{1}'.".
+                     format(table.get("type", ""), table.get("title", "")))
+        return
+
+    tbl_list = list()
+    for job, builds in table["data"].items():
+        for build in builds:
+            build = str(build)
+            try:
+                version = input_data.metadata(job, build).get("version", "")
+            except KeyError:
+                logging.error("Data for {job}: {build} is not present.".
+                              format(job=job, build=build))
+                return
+            tbl_list.append(build)
+            tbl_list.append(version)
+            for tst_name, tst_data in data[job][build].iteritems():
+                if tst_data["status"] != "FAIL":
+                    continue
+                groups = re.search(REGEX_NIC, tst_data["parent"])
+                if not groups:
+                    continue
+                nic = groups.group(0)
+                tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
+
+    file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
+    logging.info("    Writing file: '{0}'".format(file_name))
+    with open(file_name, "w") as file_handler:
+        for test in tbl_list:
+            file_handler.write(test + '\n')
+
+
 def table_failed_tests(table, input_data):
     """Generate the table(s) with algorithm: table_failed_tests
     specified in the specification file.
 def table_failed_tests(table, input_data):
     """Generate the table(s) with algorithm: table_failed_tests
     specified in the specification file.
@@ -978,6 +1054,10 @@ def table_failed_tests(table, input_data):
 
     # Generate the data for the table according to the model in the table
     # specification
 
     # Generate the data for the table according to the model in the table
     # specification
+
+    now = dt.utcnow()
+    timeperiod = timedelta(int(table.get("window", 7)))
+
     tbl_dict = dict()
     for job, builds in table["data"].items():
         for build in builds:
     tbl_dict = dict()
     for job, builds in table["data"].items():
         for build in builds:
@@ -986,31 +1066,41 @@ def table_failed_tests(table, input_data):
                 if tst_name.lower() in table["ignore-list"]:
                     continue
                 if tbl_dict.get(tst_name, None) is None:
                 if tst_name.lower() in table["ignore-list"]:
                     continue
                 if tbl_dict.get(tst_name, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[1:]))
-                    tbl_dict[tst_name] = {"name": name,
-                                          "data": OrderedDict()}
+                    groups = re.search(REGEX_NIC, tst_data["parent"])
+                    if not groups:
+                        continue
+                    nic = groups.group(0)
+                    tbl_dict[tst_name] = {
+                        "name": "{0}-{1}".format(nic, tst_data["name"]),
+                        "data": OrderedDict()}
                 try:
                 try:
-                    tbl_dict[tst_name]["data"][build] = (
-                        tst_data["status"],
-                        input_data.metadata(job, build).get("generated", ""),
-                        input_data.metadata(job, build).get("version", ""),
-                        build)
-                except (TypeError, KeyError):
-                    pass  # No data in output.xml for this test
-
+                    generated = input_data.metadata(job, build).\
+                        get("generated", "")
+                    if not generated:
+                        continue
+                    then = dt.strptime(generated, "%Y%m%d %H:%M")
+                    if (now - then) <= timeperiod:
+                        tbl_dict[tst_name]["data"][build] = (
+                            tst_data["status"],
+                            generated,
+                            input_data.metadata(job, build).get("version", ""),
+                            build)
+                except (TypeError, KeyError) as err:
+                    logging.warning("tst_name: {} - err: {}".
+                                    format(tst_name, repr(err)))
+
+    max_fails = 0
     tbl_lst = list()
     for tst_data in tbl_dict.values():
     tbl_lst = list()
     for tst_data in tbl_dict.values():
-        win_size = min(len(tst_data["data"]), table["window"])
         fails_nr = 0
         fails_nr = 0
-        for val in tst_data["data"].values()[-win_size:]:
+        for val in tst_data["data"].values():
             if val[0] == "FAIL":
                 fails_nr += 1
                 fails_last_date = val[1]
                 fails_last_vpp = val[2]
                 fails_last_csit = val[3]
         if fails_nr:
             if val[0] == "FAIL":
                 fails_nr += 1
                 fails_last_date = val[1]
                 fails_last_vpp = val[2]
                 fails_last_csit = val[3]
         if fails_nr:
+            max_fails = fails_nr if fails_nr > max_fails else max_fails
             tbl_lst.append([tst_data["name"],
                             fails_nr,
                             fails_last_date,
             tbl_lst.append([tst_data["name"],
                             fails_nr,
                             fails_last_date,
@@ -1019,7 +1109,7 @@ def table_failed_tests(table, input_data):
 
     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
     tbl_sorted = list()
 
     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
     tbl_sorted = list()
-    for nrf in range(table["window"], -1, -1):
+    for nrf in range(max_fails, -1, -1):
         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
         tbl_sorted.extend(tbl_fails)
     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
         tbl_sorted.extend(tbl_fails)
     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
@@ -1045,6 +1135,12 @@ def table_failed_tests_html(table, input_data):
     :type input_data: InputData
     """
 
     :type input_data: InputData
     """
 
+    testbed = table.get("testbed", None)
+    if testbed is None:
+        logging.error("The testbed is not defined for the table '{0}'.".
+                      format(table.get("title", "")))
+        return
+
     logging.info("  Generating the table {0} ...".
                  format(table.get("title", "")))
 
     logging.info("  Generating the table {0} ...".
                  format(table.get("title", "")))
 
@@ -1082,7 +1178,7 @@ def table_failed_tests_html(table, input_data):
             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
             # Name:
             if c_idx == 0:
             td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
             # Name:
             if c_idx == 0:
-                url = _generate_url("../trending/", item)
+                url = _generate_url("../trending/", testbed, item)
                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
                 ref.text = item
             else:
                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
                 ref.text = item
             else: