Python3: resources and libraries
[csit.git] / resources / tools / presentation / input_data_parser.py
index f2656d2..46c8b9d 100644 (file)
@@ -19,6 +19,7 @@
 - filter the data using tags,
 """
 
 - filter the data using tags,
 """
 
+import copy
 import re
 import resource
 import pandas as pd
 import re
 import resource
 import pandas as pd
@@ -30,7 +31,6 @@ from robot import errors
 from collections import OrderedDict
 from string import replace
 from os import remove
 from collections import OrderedDict
 from string import replace
 from os import remove
-from os.path import join
 from datetime import datetime as dt
 from datetime import timedelta
 from json import loads
 from datetime import datetime as dt
 from datetime import timedelta
 from json import loads
@@ -98,24 +98,28 @@ class ExecutionChecker(ResultVisitor):
                         "direction1": {
                             "min": float,
                             "avg": float,
                         "direction1": {
                             "min": float,
                             "avg": float,
-                            "max": float
+                            "max": float,
+                            "hdrh": str
                         },
                         "direction2": {
                             "min": float,
                             "avg": float,
                         },
                         "direction2": {
                             "min": float,
                             "avg": float,
-                            "max": float
+                            "max": float,
+                            "hdrh": str
                         }
                     },
                     "PDR": {
                         "direction1": {
                             "min": float,
                             "avg": float,
                         }
                     },
                     "PDR": {
                         "direction1": {
                             "min": float,
                             "avg": float,
-                            "max": float
+                            "max": float,
+                            "hdrh": str
                         },
                         "direction2": {
                             "min": float,
                             "avg": float,
                         },
                         "direction2": {
                             "min": float,
                             "avg": float,
-                            "max": float
+                            "max": float,
+                            "hdrh": str
                         }
                     }
                 }
                         }
                     }
                 }
@@ -147,60 +151,6 @@ class ExecutionChecker(ResultVisitor):
                 }
             }
 
                 }
             }
 
-            # TODO: Remove when definitely no NDRPDRDISC tests are used:
-            # NDRPDRDISC tests:
-            "ID": {
-                "name": "Test name",
-                "parent": "Name of the parent of the test",
-                "doc": "Test documentation",
-                "msg": "Test message",
-                "tags": ["tag 1", "tag 2", "tag n"],
-                "type": "PDR" | "NDR",
-                "status": "PASS" | "FAIL",
-                "throughput": {  # Only type: "PDR" | "NDR"
-                    "value": int,
-                    "unit": "pps" | "bps" | "percentage"
-                },
-                "latency": {  # Only type: "PDR" | "NDR"
-                    "direction1": {
-                        "100": {
-                            "min": int,
-                            "avg": int,
-                            "max": int
-                        },
-                        "50": {  # Only for NDR
-                            "min": int,
-                            "avg": int,
-                            "max": int
-                        },
-                        "10": {  # Only for NDR
-                            "min": int,
-                            "avg": int,
-                            "max": int
-                        }
-                    },
-                    "direction2": {
-                        "100": {
-                            "min": int,
-                            "avg": int,
-                            "max": int
-                        },
-                        "50": {  # Only for NDR
-                            "min": int,
-                            "avg": int,
-                            "max": int
-                        },
-                        "10": {  # Only for NDR
-                            "min": int,
-                            "avg": int,
-                            "max": int
-                        }
-                    }
-                },
-                "lossTolerance": "lossTolerance",  # Only type: "PDR"
-                "conf-history": "DUT1 and DUT2 VAT History"
-                "show-run": "Show Run"
-            },
             "ID" {
                 # next test
             }
             "ID" {
                 # next test
             }
@@ -251,27 +201,14 @@ class ExecutionChecker(ResultVisitor):
     # TODO: Remove when definitely no NDRPDRDISC tests are used:
     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
 
     # TODO: Remove when definitely no NDRPDRDISC tests are used:
     REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
 
-    REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
-                                r'PLRsearch upper bound::\s(\d+.\d+)')
+    REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
+                                r'PLRsearch upper bound::?\s(\d+.\d+)')
 
     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
                                    r'PDR_UPPER:\s(\d+.\d+)')
 
 
     REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
                                    r'NDR_UPPER:\s(\d+.\d+).*\n'
                                    r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
                                    r'PDR_UPPER:\s(\d+.\d+)')
 
-    # TODO: Remove when definitely no NDRPDRDISC tests are used:
-    REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
-                               r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
-                               r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
-                               r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
-                               r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
-                               r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
-                               r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
-
-    REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
-                               r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
-                               r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
-
     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
 
     REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
                                   r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
 
@@ -281,9 +218,9 @@ class ExecutionChecker(ResultVisitor):
     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
 
     REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
                                    r"VPP Version:\s*|VPP version:\s*)(.*)")
 
-    REGEX_VERSION_DPDK = re.compile(r"DPDK Version: (\d*.\d*)")
+    REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
 
 
-    REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
+    REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
 
     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
                            r'tx\s(\d*),\srx\s(\d*)')
 
     REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
                            r'tx\s(\d*),\srx\s(\d*)')
@@ -291,13 +228,18 @@ class ExecutionChecker(ResultVisitor):
     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
                             r' in packets per second: \[(.*)\]')
 
     REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
                             r' in packets per second: \[(.*)\]')
 
+    REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
+    REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
+
     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
 
     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
 
     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
 
     REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
 
     REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
 
     REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
 
-    REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
+    REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
+
+    REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
 
     def __init__(self, metadata, mapping, ignore):
         """Initialisation.
 
     def __init__(self, metadata, mapping, ignore):
         """Initialisation.
@@ -425,7 +367,7 @@ class ExecutionChecker(ResultVisitor):
         if msg.message.count("DPDK Version:"):
             try:
                 self._version = str(re.search(
         if msg.message.count("DPDK Version:"):
             try:
                 self._version = str(re.search(
-                    self.REGEX_VERSION_DPDK, msg.message). group(1))
+                    self.REGEX_VERSION_DPDK, msg.message). group(2))
                 self._data["metadata"]["version"] = self._version
             except IndexError:
                 pass
                 self._data["metadata"]["version"] = self._version
             except IndexError:
                 pass
@@ -457,7 +399,7 @@ class ExecutionChecker(ResultVisitor):
                 self._data["tests"][self._test_ID]["conf-history"] = str()
             else:
                 self._msg_type = None
                 self._data["tests"][self._test_ID]["conf-history"] = str()
             else:
                 self._msg_type = None
-            text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
+            text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
                           "VAT command history:", "", msg.message, count=1). \
                 replace("\n\n", "\n").replace('\n', ' |br| ').\
                 replace('\r', '').replace('"', "'")
                           "VAT command history:", "", msg.message, count=1). \
                 replace("\n\n", "\n").replace('\n', ' |br| ').\
                 replace('\r', '').replace('"', "'")
@@ -479,7 +421,7 @@ class ExecutionChecker(ResultVisitor):
                 self._data["tests"][self._test_ID]["conf-history"] = str()
             else:
                 self._msg_type = None
                 self._data["tests"][self._test_ID]["conf-history"] = str()
             else:
                 self._msg_type = None
-            text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
+            text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
                           "PAPI command history:", "", msg.message, count=1). \
                 replace("\n\n", "\n").replace('\n', ' |br| ').\
                 replace('\r', '').replace('"', "'")
                           "PAPI command history:", "", msg.message, count=1). \
                 replace("\n\n", "\n").replace('\n', ' |br| ').\
                 replace('\r', '').replace('"', "'")
@@ -496,105 +438,77 @@ class ExecutionChecker(ResultVisitor):
         :type msg: Message
         :returns: Nothing.
         """
         :type msg: Message
         :returns: Nothing.
         """
-        if msg.message.count("Runtime:"):
-            self._show_run_lookup_nr += 1
-            if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
-                self._data["tests"][self._test_ID]["show-run"] = str()
-            if self._lookup_kw_nr > 1:
-                self._msg_type = None
-            if self._show_run_lookup_nr == 1:
-                message = str(msg.message).replace(' ', '').replace('\n', '').\
-                    replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
-                runtime = loads(message)
-                try:
-                    threads_nr = len(runtime[0]["clocks"])
-                except (IndexError, KeyError):
-                    return
-                tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
-                table = [[tbl_hdr, ] for _ in range(threads_nr)]
-                for item in runtime:
-                    for idx in range(threads_nr):
+        if not "show-run" in self._data["tests"][self._test_ID].keys():
+            self._data["tests"][self._test_ID]["show-run"] = str()
+
+        if msg.message.count("stats runtime"):
+            host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).\
+                       group(1))
+            socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).\
+                         group(2))
+            message = str(msg.message).replace(' ', '').replace('\n', '').\
+                replace("'", '"').replace('b"', '"').replace('u"', '"').\
+                split(":",1)[1]
+            runtime = loads(message)
+            try:
+                threads_nr = len(runtime[0]["clocks"])
+            except (IndexError, KeyError):
+                return
+            tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks",
+                       "Vectors/Calls"]
+            table = [[tbl_hdr, ] for _ in range(threads_nr)]
+            for item in runtime:
+                for idx in range(threads_nr):
+                    name = format(item["name"])
+                    calls = format(item["calls"][idx])
+                    vectors = format(item["vectors"][idx])
+                    suspends = format(item["suspends"][idx])
+                    if item["vectors"][idx] > 0:
+                        clocks = format(
+                            item["clocks"][idx]/item["vectors"][idx], ".2e")
+                    elif item["calls"][idx] > 0:
+                        clocks = format(
+                            item["clocks"][idx]/item["calls"][idx], ".2e")
+                    elif item["suspends"][idx] > 0:
+                        clocks = format(
+                            item["clocks"][idx]/item["suspends"][idx], ".2e")
+                    else:
+                        clocks = 0
+                    if item["calls"][idx] > 0:
+                        vectors_call = format(
+                            item["vectors"][idx]/item["calls"][idx], ".2f")
+                    else:
+                        vectors_call = format(0, ".2f")
+                    if int(calls) + int(vectors) + int(suspends):
                         table[idx].append([
                         table[idx].append([
-                            item["name"],
-                            item["calls"][idx],
-                            item["vectors"][idx],
-                            item["suspends"][idx],
-                            item["clocks"][idx]
+                            name, calls, vectors, suspends, clocks, vectors_call
                         ])
                         ])
-                text = ""
-                for idx in range(threads_nr):
-                    text += "Thread {idx} ".format(idx=idx)
-                    text += "vpp_main\n" if idx == 0 else \
-                        "vpp_wk_{idx}\n".format(idx=idx-1)
-                    txt_table = None
-                    for row in table[idx]:
-                        if txt_table is None:
-                            txt_table = prettytable.PrettyTable(row)
-                        else:
-                            if any(row[1:]):
-                                txt_table.add_row(row)
-                    txt_table.align["Name"] = "l"
-                    txt_table.align["Calls"] = "r"
-                    txt_table.align["Vectors"] = "r"
-                    txt_table.align["Suspends"] = "r"
-                    txt_table.align["Clocks"] = "r"
-
-                    text += txt_table.get_html_string(sortby="Name") + '\n'
-
-                text = text.replace('\n', '').replace('\r', '')
-                try:
-                    self._data["tests"][self._test_ID]["show-run"] += " |br| "
-                    self._data["tests"][self._test_ID]["show-run"] += \
-                        "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
-                except KeyError:
-                    pass
-
-    # TODO: Remove when definitely no NDRPDRDISC tests are used:
-    def _get_latency(self, msg, test_type):
-        """Get the latency data from the test message.
-
-        :param msg: Message to be parsed.
-        :param test_type: Type of the test - NDR or PDR.
-        :type msg: str
-        :type test_type: str
-        :returns: Latencies parsed from the message.
-        :rtype: dict
-        """
-
-        if test_type == "NDR":
-            groups = re.search(self.REGEX_LAT_NDR, msg)
-            groups_range = range(1, 7)
-        elif test_type == "PDR":
-            groups = re.search(self.REGEX_LAT_PDR, msg)
-            groups_range = range(1, 3)
-        else:
-            return {}
-
-        latencies = list()
-        for idx in groups_range:
-            try:
-                lat = [int(item) for item in str(groups.group(idx)).split('/')]
-            except (AttributeError, ValueError):
-                lat = [-1, -1, -1]
-            latencies.append(lat)
-
-        keys = ("min", "avg", "max")
-        latency = {
-            "direction1": {
-            },
-            "direction2": {
-            }
-        }
-
-        latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
-        latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
-        if test_type == "NDR":
-            latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
-            latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
-            latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
-            latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
-
-        return latency
+            text = ""
+            for idx in range(threads_nr):
+                text += "Thread {idx} ".format(idx=idx)
+                text += "vpp_main\n" if idx == 0 else \
+                    "vpp_wk_{idx}\n".format(idx=idx-1)
+                txt_table = None
+                for row in table[idx]:
+                    if txt_table is None:
+                        txt_table = prettytable.PrettyTable(row)
+                    else:
+                        if any(row[1:]):
+                            txt_table.add_row(row)
+                txt_table.set_style(prettytable.MSWORD_FRIENDLY)
+                txt_table.align["Name"] = "l"
+                txt_table.align["Calls"] = "r"
+                txt_table.align["Vectors"] = "r"
+                txt_table.align["Suspends"] = "r"
+                txt_table.align["Clocks"] = "r"
+                txt_table.align["Vectors/Calls"] = "r"
+
+                text += txt_table.get_string(sortby="Name") + '\n'
+            text = (" \n **DUT: {host}/{socket}** \n {text}".
+                    format(host=host, socket=socket, text=text))
+            text = text.replace('\n', ' |br| ').replace('\r', '').\
+                replace('"', "'")
+            self._data["tests"][self._test_ID]["show-run"] += text
 
     def _get_ndrpdr_throughput(self, msg):
         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
 
     def _get_ndrpdr_throughput(self, msg):
         """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
@@ -660,31 +574,52 @@ class ExecutionChecker(ResultVisitor):
         :returns: Parsed data as a dict and the status (PASS/FAIL).
         :rtype: tuple(dict, str)
         """
         :returns: Parsed data as a dict and the status (PASS/FAIL).
         :rtype: tuple(dict, str)
         """
-
+        latency_default = {"min": -1.0, "avg": -1.0, "max": -1.0, "hdrh": ""}
         latency = {
             "NDR": {
         latency = {
             "NDR": {
-                "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
-                "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
+                "direction1": copy.copy(latency_default),
+                "direction2": copy.copy(latency_default)
             },
             "PDR": {
             },
             "PDR": {
-                "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
-                "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
+                "direction1": copy.copy(latency_default),
+                "direction2": copy.copy(latency_default)
             }
         }
         status = "FAIL"
         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
 
             }
         }
         status = "FAIL"
         groups = re.search(self.REGEX_NDRPDR_LAT, msg)
 
+        def process_latency(in_str):
+            """Return object with parsed latency values.
+
+            TODO: Define class for the return type.
+
+            :param in_str: Input string, min/avg/max/hdrh format.
+            :type in_str: str
+            :returns: Dict with corresponding keys, except hdrh float values.
+            :rtype dict:
+            :throws IndexError: If in_str does not have enough substrings.
+            :throws ValueError: If a substring does not convert to float.
+            """
+            in_list = in_str.split('/')
+
+            rval = {
+                "min": float(in_list[0]),
+                "avg": float(in_list[1]),
+                "max": float(in_list[2]),
+                "hdrh": ""
+            }
+
+            if len(in_list) == 4:
+                rval["hdrh"] = str(in_list[3])
+
+            return rval
+
         if groups is not None:
         if groups is not None:
-            keys = ("min", "avg", "max")
             try:
             try:
-                latency["NDR"]["direction1"] = dict(
-                    zip(keys, [float(l) for l in groups.group(1).split('/')]))
-                latency["NDR"]["direction2"] = dict(
-                    zip(keys, [float(l) for l in groups.group(2).split('/')]))
-                latency["PDR"]["direction1"] = dict(
-                    zip(keys, [float(l) for l in groups.group(3).split('/')]))
-                latency["PDR"]["direction2"] = dict(
-                    zip(keys, [float(l) for l in groups.group(4).split('/')]))
+                latency["NDR"]["direction1"] = process_latency(groups.group(1))
+                latency["NDR"]["direction2"] = process_latency(groups.group(2))
+                latency["PDR"]["direction1"] = process_latency(groups.group(3))
+                latency["PDR"]["direction2"] = process_latency(groups.group(4))
                 status = "PASS"
             except (IndexError, ValueError):
                 pass
                 status = "PASS"
             except (IndexError, ValueError):
                 pass
@@ -799,6 +734,7 @@ class ExecutionChecker(ResultVisitor):
             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
             if not groups:
                 tag_count = 0
             groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
             if not groups:
                 tag_count = 0
+                tag_tc = str()
                 for tag in test_result["tags"]:
                     groups = re.search(self.REGEX_TC_TAG, tag)
                     if groups:
                 for tag in test_result["tags"]:
                     groups = re.search(self.REGEX_TC_TAG, tag)
                     if groups:
@@ -827,7 +763,8 @@ class ExecutionChecker(ResultVisitor):
                                       "SOAK" in tags or
                                       "TCP" in tags or
                                       "MRR" in tags or
                                       "SOAK" in tags or
                                       "TCP" in tags or
                                       "MRR" in tags or
-                                      "BMRR" in tags):
+                                      "BMRR" in tags or
+                                      "RECONF" in tags):
             # TODO: Remove when definitely no NDRPDRDISC tests are used:
             if "NDRDISC" in tags:
                 test_result["type"] = "NDR"
             # TODO: Remove when definitely no NDRPDRDISC tests are used:
             if "NDRDISC" in tags:
                 test_result["type"] = "NDR"
@@ -844,6 +781,8 @@ class ExecutionChecker(ResultVisitor):
                 test_result["type"] = "MRR"
             elif "FRMOBL" in tags or "BMRR" in tags:
                 test_result["type"] = "BMRR"
                 test_result["type"] = "MRR"
             elif "FRMOBL" in tags or "BMRR" in tags:
                 test_result["type"] = "BMRR"
+            elif "RECONF" in tags:
+                test_result["type"] = "RECONF"
             else:
                 test_result["status"] = "FAIL"
                 self._data["tests"][self._test_ID] = test_result
             else:
                 test_result["status"] = "FAIL"
                 self._data["tests"][self._test_ID] = test_result
@@ -905,6 +844,18 @@ class ExecutionChecker(ResultVisitor):
                         AvgStdevMetadataFactory.from_data([
                             float(groups.group(3)) / float(groups.group(1)), ])
 
                         AvgStdevMetadataFactory.from_data([
                             float(groups.group(3)) / float(groups.group(1)), ])
 
+            elif test_result["type"] == "RECONF":
+                test_result["result"] = None
+                try:
+                    grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
+                    grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
+                    test_result["result"] = {
+                        "loss": int(grps_loss.group(1)),
+                        "time": float(grps_time.group(1))
+                    }
+                except (AttributeError, IndexError, ValueError, TypeError):
+                    test_result["status"] = "FAIL"
+
         self._data["tests"][self._test_ID] = test_result
 
     def end_test(self, test):
         self._data["tests"][self._test_ID] = test_result
 
     def end_test(self, test):
@@ -1108,7 +1059,7 @@ class ExecutionChecker(ResultVisitor):
         pass
 
 
         pass
 
 
-class InputData(object):
+class InputData:
     """Input data
 
     The data is extracted from output.xml files generated by Jenkins jobs and
     """Input data
 
     The data is extracted from output.xml files generated by Jenkins jobs and
@@ -1290,23 +1241,6 @@ class InputData(object):
                             ("INFO",
                              "    The build {job}/{build} is outdated, will be "
                              "removed".format(job=job, build=build["build"])))
                             ("INFO",
                              "    The build {job}/{build} is outdated, will be "
                              "removed".format(job=job, build=build["build"])))
-                        file_name = self._cfg.input["file-name"]
-                        full_name = join(
-                            self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
-                            "{job}{sep}{build}{sep}{name}".format(
-                                job=job,
-                                sep=SEPARATOR,
-                                build=build["build"],
-                                name=file_name))
-                        try:
-                            remove(full_name)
-                            logs.append(("INFO",
-                                         "    The file {name} has been removed".
-                                         format(name=full_name)))
-                        except OSError as err:
-                            logs.append(("ERROR",
-                                         "Cannot remove the file '{0}': {1}".
-                                         format(full_name, repr(err))))
         logs.append(("INFO", "  Done."))
 
         for level, line in logs:
         logs.append(("INFO", "  Done."))
 
         for level, line in logs:
@@ -1403,7 +1337,7 @@ class InputData(object):
             index += 1
             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
 
             index += 1
             tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
 
-    def filter_data(self, element, params=None, data_set="tests",
+    def filter_data(self, element, params=None, data=None, data_set="tests",
                     continue_on_error=False):
         """Filter required data from the given jobs and builds.
 
                     continue_on_error=False):
         """Filter required data from the given jobs and builds.
 
@@ -1426,13 +1360,16 @@ class InputData(object):
 
         :param element: Element which will use the filtered data.
         :param params: Parameters which will be included in the output. If None,
 
         :param element: Element which will use the filtered data.
         :param params: Parameters which will be included in the output. If None,
-        all parameters are included.
+            all parameters are included.
+        :param data: If not None, this data is used instead of data specified
+            in the element.
         :param data_set: The set of data to be filtered: tests, suites,
         :param data_set: The set of data to be filtered: tests, suites,
-        metadata.
+            metadata.
         :param continue_on_error: Continue if there is error while reading the
         :param continue_on_error: Continue if there is error while reading the
-        data. The Item will be empty then
+            data. The Item will be empty then
         :type element: pandas.Series
         :type params: list
         :type element: pandas.Series
         :type params: list
+        :type data: dict
         :type data_set: str
         :type continue_on_error: bool
         :returns: Filtered data.
         :type data_set: str
         :type continue_on_error: bool
         :returns: Filtered data.
@@ -1454,9 +1391,10 @@ class InputData(object):
             if params:
                 params.append("type")
 
             if params:
                 params.append("type")
 
+        data_to_filter = data if data else element["data"]
         data = pd.Series()
         try:
         data = pd.Series()
         try:
-            for job, builds in element["data"].items():
+            for job, builds in data_to_filter.items():
                 data[job] = pd.Series()
                 for build in builds:
                     data[job][str(build)] = pd.Series()
                 data[job] = pd.Series()
                 for build in builds:
                     data[job][str(build)] = pd.Series()
@@ -1495,6 +1433,96 @@ class InputData(object):
                           "tags are enclosed by apostrophes.".format(cond))
             return None
 
                           "tags are enclosed by apostrophes.".format(cond))
             return None
 
+    def filter_tests_by_name(self, element, params=None, data_set="tests",
+                             continue_on_error=False):
+        """Filter required data from the given jobs and builds.
+
+        The output data structure is:
+
+        - job 1
+          - build 1
+            - test (or suite) 1 ID:
+              - param 1
+              - param 2
+              ...
+              - param n
+            ...
+            - test (or suite) n ID:
+            ...
+          ...
+          - build n
+        ...
+        - job n
+
+        :param element: Element which will use the filtered data.
+        :param params: Parameters which will be included in the output. If None,
+        all parameters are included.
+        :param data_set: The set of data to be filtered: tests, suites,
+        metadata.
+        :param continue_on_error: Continue if there is error while reading the
+        data. The Item will be empty then
+        :type element: pandas.Series
+        :type params: list
+        :type data_set: str
+        :type continue_on_error: bool
+        :returns: Filtered data.
+        :rtype pandas.Series
+        """
+
+        include = element.get("include", None)
+        if not include:
+            logging.warning("No tests to include, skipping the element.")
+            return None
+
+        if params is None:
+            params = element.get("parameters", None)
+            if params:
+                params.append("type")
+
+        data = pd.Series()
+        try:
+            for job, builds in element["data"].items():
+                data[job] = pd.Series()
+                for build in builds:
+                    data[job][str(build)] = pd.Series()
+                    for test in include:
+                        try:
+                            reg_ex = re.compile(str(test).lower())
+                            for test_ID in self.data[job][str(build)]\
+                                    [data_set].keys():
+                                if re.match(reg_ex, str(test_ID).lower()):
+                                    test_data = self.data[job][str(build)]\
+                                        [data_set][test_ID]
+                                    data[job][str(build)][test_ID] = pd.Series()
+                                    if params is None:
+                                        for param, val in test_data.items():
+                                            data[job][str(build)][test_ID]\
+                                                [param] = val
+                                    else:
+                                        for param in params:
+                                            try:
+                                                data[job][str(build)][test_ID]\
+                                                    [param] = test_data[param]
+                                            except KeyError:
+                                                data[job][str(build)][test_ID]\
+                                                    [param] = "No Data"
+                        except KeyError as err:
+                            logging.error("{err!r}".format(err=err))
+                            if continue_on_error:
+                                continue
+                            else:
+                                return None
+            return data
+
+        except (KeyError, IndexError, ValueError) as err:
+            logging.error("Missing mandatory parameter in the element "
+                          "specification: {err!r}".format(err=err))
+            return None
+        except AttributeError as err:
+            logging.error("{err!r}".format(err=err))
+            return None
+
+
     @staticmethod
     def merge_data(data):
         """Merge data from more jobs and builds to a simple data structure.
     @staticmethod
     def merge_data(data):
         """Merge data from more jobs and builds to a simple data structure.