Report: Add gso tests
[csit.git] / resources / tools / presentation / input_data_parser.py
index a619bd2..6742439 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -25,10 +25,12 @@ import resource
 import logging
 
 from collections import OrderedDict
 import logging
 
 from collections import OrderedDict
-from os import remove
+from os import remove, walk, listdir
+from os.path import isfile, isdir, join
 from datetime import datetime as dt
 from datetime import timedelta
 from json import loads
 from datetime import datetime as dt
 from datetime import timedelta
 from json import loads
+from json.decoder import JSONDecodeError
 
 import hdrh.histogram
 import hdrh.codec
 
 import hdrh.histogram
 import hdrh.codec
@@ -40,6 +42,7 @@ from robot import errors
 
 from resources.libraries.python import jumpavg
 from input_data_files import download_and_unzip_data_file
 
 from resources.libraries.python import jumpavg
 from input_data_files import download_and_unzip_data_file
+from pal_errors import PresentationError
 
 
 # Separator used in file names
 
 
 # Separator used in file names
@@ -213,6 +216,12 @@ class ExecutionChecker(ResultVisitor):
         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
         r'PDR_UPPER:\s(\d+.\d+)'
     )
         r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
         r'PDR_UPPER:\s(\d+.\d+)'
     )
+    REGEX_NDRPDR_GBPS = re.compile(
+        r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
+        r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
+        r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
+        r'PDR_UPPER:.*,\s(\d+.\d+)'
+    )
     REGEX_PERF_MSG_INFO = re.compile(
         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
     REGEX_PERF_MSG_INFO = re.compile(
         r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
         r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
@@ -220,9 +229,17 @@ class ExecutionChecker(ResultVisitor):
         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
     )
         r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
         r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
     )
+    REGEX_CPS_MSG_INFO = re.compile(
+        r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
+        r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
+    )
+    REGEX_PPS_MSG_INFO = re.compile(
+        r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
+        r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
+    )
     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
 
     REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
 
-    # TODO: Remove when not needed
+    # Needed for CPS and PPS tests
     REGEX_NDRPDR_LAT_BASE = re.compile(
         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
     REGEX_NDRPDR_LAT_BASE = re.compile(
         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
         r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
@@ -235,18 +252,7 @@ class ExecutionChecker(ResultVisitor):
         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
     )
         r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
         r'Latency.*\[\'(.*)\', \'(.*)\'\]'
     )
-    # TODO: Remove when not needed
-    REGEX_NDRPDR_LAT_LONG = re.compile(
-        r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
-        r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
-        r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
-        r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
-        r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
-        r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
-        r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
-        r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
-        r'Latency.*\[\'(.*)\', \'(.*)\'\]'
-    )
+
     REGEX_VERSION_VPP = re.compile(
         r"(return STDOUT Version:\s*|"
         r"VPP Version:\s*|VPP version:\s*)(.*)"
     REGEX_VERSION_VPP = re.compile(
         r"(return STDOUT Version:\s*|"
         r"VPP Version:\s*|VPP version:\s*)(.*)"
@@ -262,8 +268,7 @@ class ExecutionChecker(ResultVisitor):
         r'tx\s(\d*),\srx\s(\d*)'
     )
     REGEX_BMRR = re.compile(
         r'tx\s(\d*),\srx\s(\d*)'
     )
     REGEX_BMRR = re.compile(
-        r'Maximum Receive Rate trial results'
-        r' in packets per second: \[(.*)\]'
+        r'.*trial results.*: \[(.*)\]'
     )
     REGEX_RECONF_LOSS = re.compile(
         r'Packets lost due to reconfig: (\d*)'
     )
     REGEX_RECONF_LOSS = re.compile(
         r'Packets lost due to reconfig: (\d*)'
@@ -368,33 +373,77 @@ class ExecutionChecker(ResultVisitor):
 
         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
         if not groups or groups.lastindex != 1:
 
         groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
         if not groups or groups.lastindex != 1:
-            return msg
+            return u"Test Failed."
 
         try:
             data = groups.group(1).split(u", ")
         except (AttributeError, IndexError, ValueError, KeyError):
 
         try:
             data = groups.group(1).split(u", ")
         except (AttributeError, IndexError, ValueError, KeyError):
-            return msg
+            return u"Test Failed."
 
         out_str = u"["
         try:
             for item in data:
 
         out_str = u"["
         try:
             for item in data:
-                out_str += f"{float(item):.2f}, "
+                out_str += f"{(float(item) / 1e6):.2f}, "
             return out_str[:-2] + u"]"
         except (AttributeError, IndexError, ValueError, KeyError):
             return out_str[:-2] + u"]"
         except (AttributeError, IndexError, ValueError, KeyError):
-            return msg
+            return u"Test Failed."
+
+    def _get_data_from_cps_test_msg(self, msg):
+        """Get info from message of NDRPDR CPS tests.
+
+        :param msg: Message to be processed.
+        :type msg: str
+        :returns: Processed message or "Test Failed." if a problem occurs.
+        :rtype: str
+        """
+
+        groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
+        if not groups or groups.lastindex != 2:
+            return u"Test Failed."
+
+        try:
+            return (
+                f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
+                f"2. {(float(groups.group(2)) / 1e6):5.2f}"
+            )
+        except (AttributeError, IndexError, ValueError, KeyError):
+            return u"Test Failed."
+
+    def _get_data_from_pps_test_msg(self, msg):
+        """Get info from message of NDRPDR PPS tests.
+
+        :param msg: Message to be processed.
+        :type msg: str
+        :returns: Processed message or "Test Failed." if a problem occurs.
+        :rtype: str
+        """
+
+        groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
+        if not groups or groups.lastindex != 4:
+            return u"Test Failed."
+
+        try:
+            return (
+                f"1. {(float(groups.group(1)) / 1e6):5.2f}      "
+                f"{float(groups.group(2)):5.2f}\n"
+                f"2. {(float(groups.group(3)) / 1e6):5.2f}      "
+                f"{float(groups.group(4)):5.2f}"
+            )
+        except (AttributeError, IndexError, ValueError, KeyError):
+            return u"Test Failed."
 
     def _get_data_from_perf_test_msg(self, msg):
         """Get info from message of NDRPDR performance tests.
 
         :param msg: Message to be processed.
         :type msg: str
 
     def _get_data_from_perf_test_msg(self, msg):
         """Get info from message of NDRPDR performance tests.
 
         :param msg: Message to be processed.
         :type msg: str
-        :returns: Processed message or original message if a problem occurs.
+        :returns: Processed message or "Test Failed." if a problem occurs.
         :rtype: str
         """
 
         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
         if not groups or groups.lastindex != 10:
         :rtype: str
         """
 
         groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
         if not groups or groups.lastindex != 10:
-            return msg
+            return u"Test Failed."
 
         try:
             data = {
 
         try:
             data = {
@@ -410,7 +459,7 @@ class ExecutionChecker(ResultVisitor):
                 u"pdr_lat_10_2": groups.group(10),
             }
         except (AttributeError, IndexError, ValueError, KeyError):
                 u"pdr_lat_10_2": groups.group(10),
             }
         except (AttributeError, IndexError, ValueError, KeyError):
-            return msg
+            return u"Test Failed."
 
         def _process_lat(in_str_1, in_str_2):
             """Extract min, avg, max values from latency string.
 
         def _process_lat(in_str_1, in_str_2):
             """Extract min, avg, max values from latency string.
@@ -421,65 +470,75 @@ class ExecutionChecker(ResultVisitor):
                 robot framework.
             :type in_str_1: str
             :type in_str_2: str
                 robot framework.
             :type in_str_1: str
             :type in_str_2: str
-            :returns: Processed latency string or empty string if a problem
-                occurs.
-            :rtype: tuple(str, str)
+            :returns: Processed latency string or None if a problem occurs.
+            :rtype: tuple
             """
             in_list_1 = in_str_1.split('/', 3)
             in_list_2 = in_str_2.split('/', 3)
 
             if len(in_list_1) != 4 and len(in_list_2) != 4:
             """
             in_list_1 = in_str_1.split('/', 3)
             in_list_2 = in_str_2.split('/', 3)
 
             if len(in_list_1) != 4 and len(in_list_2) != 4:
-                return u""
+                return None
 
             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
             try:
                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
             except hdrh.codec.HdrLengthException:
 
             in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
             try:
                 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
             except hdrh.codec.HdrLengthException:
-                return u""
+                return None
 
             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
             try:
                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
             except hdrh.codec.HdrLengthException:
 
             in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
             try:
                 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
             except hdrh.codec.HdrLengthException:
-                return u""
+                return None
 
             if hdr_lat_1 and hdr_lat_2:
 
             if hdr_lat_1 and hdr_lat_2:
-                hdr_lat_1_50 = hdr_lat_1.get_value_at_percentile(50.0)
-                hdr_lat_1_90 = hdr_lat_1.get_value_at_percentile(90.0)
-                hdr_lat_1_99 = hdr_lat_1.get_value_at_percentile(99.0)
-                hdr_lat_2_50 = hdr_lat_2.get_value_at_percentile(50.0)
-                hdr_lat_2_90 = hdr_lat_2.get_value_at_percentile(90.0)
-                hdr_lat_2_99 = hdr_lat_2.get_value_at_percentile(99.0)
-
-                if (hdr_lat_1_50 + hdr_lat_1_90 + hdr_lat_1_99 +
-                        hdr_lat_2_50 + hdr_lat_2_90 + hdr_lat_2_99):
-                    return (
-                        f"{hdr_lat_1_50} {hdr_lat_1_90} {hdr_lat_1_99} , "
-                        f"{hdr_lat_2_50} {hdr_lat_2_90} {hdr_lat_2_99}"
-                    )
+                hdr_lat = (
+                    hdr_lat_1.get_value_at_percentile(50.0),
+                    hdr_lat_1.get_value_at_percentile(90.0),
+                    hdr_lat_1.get_value_at_percentile(99.0),
+                    hdr_lat_2.get_value_at_percentile(50.0),
+                    hdr_lat_2.get_value_at_percentile(90.0),
+                    hdr_lat_2.get_value_at_percentile(99.0)
+                )
 
 
-            return u""
+                if all(hdr_lat):
+                    return hdr_lat
 
 
-        try:
-            pdr_lat_10 = _process_lat(data[u'pdr_lat_10_1'],
-                                      data[u'pdr_lat_10_2'])
-            pdr_lat_50 = _process_lat(data[u'pdr_lat_50_1'],
-                                      data[u'pdr_lat_50_2'])
-            pdr_lat_90 = _process_lat(data[u'pdr_lat_90_1'],
-                                      data[u'pdr_lat_90_2'])
-            pdr_lat_10 = f"\n3. {pdr_lat_10}" if pdr_lat_10 else u""
-            pdr_lat_50 = f"\n4. {pdr_lat_50}" if pdr_lat_50 else u""
-            pdr_lat_90 = f"\n5. {pdr_lat_90}" if pdr_lat_90 else u""
+            return None
 
 
-            return (
-                f"1. {(data[u'ndr_low'] / 1e6):.2f} {data[u'ndr_low_b']:.2f}"
-                f"\n2. {(data[u'pdr_low'] / 1e6):.2f} {data[u'pdr_low_b']:.2f}"
-                f"{pdr_lat_10}"
-                f"{pdr_lat_50}"
-                f"{pdr_lat_90}"
+        try:
+            out_msg = (
+                f"1. {(data[u'ndr_low'] / 1e6):5.2f}      "
+                f"{data[u'ndr_low_b']:5.2f}"
+                f"\n2. {(data[u'pdr_low'] / 1e6):5.2f}      "
+                f"{data[u'pdr_low_b']:5.2f}"
+            )
+            latency = (
+                _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
+                _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
+                _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
             )
             )
+            if all(latency):
+                max_len = len(str(max((max(item) for item in latency))))
+                max_len = 4 if max_len < 4 else max_len
+
+                for idx, lat in enumerate(latency):
+                    if not idx:
+                        out_msg += u"\n"
+                    out_msg += (
+                        f"\n{idx + 3}. "
+                        f"{lat[0]:{max_len}d} "
+                        f"{lat[1]:{max_len}d} "
+                        f"{lat[2]:{max_len}d}      "
+                        f"{lat[3]:{max_len}d} "
+                        f"{lat[4]:{max_len}d} "
+                        f"{lat[5]:{max_len}d} "
+                    )
+
+            return out_msg
+
         except (AttributeError, IndexError, ValueError, KeyError):
         except (AttributeError, IndexError, ValueError, KeyError):
-            return msg
+            return u"Test Failed."
 
     def _get_testbed(self, msg):
         """Called when extraction of testbed IP is required.
 
     def _get_testbed(self, msg):
         """Called when extraction of testbed IP is required.
@@ -511,8 +570,8 @@ class ExecutionChecker(ResultVisitor):
         """
 
         if msg.message.count(u"return STDOUT Version:") or \
         """
 
         if msg.message.count(u"return STDOUT Version:") or \
-            msg.message.count(u"VPP Version:") or \
-            msg.message.count(u"VPP version:"):
+                msg.message.count(u"VPP Version:") or \
+                msg.message.count(u"VPP version:"):
             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
                                 group(2))
             self._data[u"metadata"][u"version"] = self._version
             self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
                                 group(2))
             self._data[u"metadata"][u"version"] = self._version
@@ -700,6 +759,35 @@ class ExecutionChecker(ResultVisitor):
 
         return throughput, status
 
 
         return throughput, status
 
+    def _get_ndrpdr_throughput_gbps(self, msg):
+        """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
+        test message.
+
+        :param msg: The test message to be parsed.
+        :type msg: str
+        :returns: Parsed data as a dict and the status (PASS/FAIL).
+        :rtype: tuple(dict, str)
+        """
+
+        gbps = {
+            u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
+            u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
+        }
+        status = u"FAIL"
+        groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
+
+        if groups is not None:
+            try:
+                gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
+                gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
+                gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
+                gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
+                status = u"PASS"
+            except (IndexError, ValueError):
+                pass
+
+        return gbps, status
+
     def _get_plr_throughput(self, msg):
         """Get PLRsearch lower bound and PLRsearch upper bound from the test
         message.
     def _get_plr_throughput(self, msg):
         """Get PLRsearch lower bound and PLRsearch upper bound from the test
         message.
@@ -768,10 +856,7 @@ class ExecutionChecker(ResultVisitor):
             },
         }
 
             },
         }
 
-        # TODO: Rewrite when long and base are not needed
-        groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
-        if groups is None:
-            groups = re.search(self.REGEX_NDRPDR_LAT, msg)
+        groups = re.search(self.REGEX_NDRPDR_LAT, msg)
         if groups is None:
             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
         if groups is None:
         if groups is None:
             groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
         if groups is None:
@@ -861,6 +946,40 @@ class ExecutionChecker(ResultVisitor):
 
         return latency, u"FAIL"
 
 
         return latency, u"FAIL"
 
+    @staticmethod
+    def _get_hoststack_data(msg, tags):
+        """Get data from the hoststack test message.
+
+        :param msg: The test message to be parsed.
+        :param tags: Test tags.
+        :type msg: str
+        :type tags: list
+        :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
+        :rtype: tuple(dict, str)
+        """
+        result = dict()
+        status = u"FAIL"
+
+        msg = msg.replace(u"'", u'"').replace(u" ", u"")
+        if u"LDPRELOAD" in tags:
+            try:
+                result = loads(msg)
+                status = u"PASS"
+            except JSONDecodeError:
+                pass
+        elif u"VPPECHO" in tags:
+            try:
+                msg_lst = msg.replace(u"}{", u"} {").split(u" ")
+                result = dict(
+                    client=loads(msg_lst[0]),
+                    server=loads(msg_lst[1])
+                )
+                status = u"PASS"
+            except (JSONDecodeError, IndexError):
+                pass
+
+        return result, status
+
     def visit_suite(self, suite):
         """Implements traversing through the suite and its direct children.
 
     def visit_suite(self, suite):
         """Implements traversing through the suite and its direct children.
 
@@ -956,11 +1075,16 @@ class ExecutionChecker(ResultVisitor):
             name = test.name.lower()
 
         # Remove TC number from the TC long name (backward compatibility):
             name = test.name.lower()
 
         # Remove TC number from the TC long name (backward compatibility):
-        self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
+        self._test_id = re.sub(
+            self.REGEX_TC_NUMBER, u"", longname.replace(u"snat", u"nat")
+        )
         # Remove TC number from the TC name (not needed):
         # Remove TC number from the TC name (not needed):
-        test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
+        test_result[u"name"] = re.sub(
+            self.REGEX_TC_NUMBER, "", name.replace(u"snat", u"nat")
+        )
 
 
-        test_result[u"parent"] = test.parent.name.lower()
+        test_result[u"parent"] = test.parent.name.lower().\
+            replace(u"snat", u"nat")
         test_result[u"tags"] = tags
         test_result["doc"] = test.doc.\
             replace(u'"', u"'").\
         test_result[u"tags"] = tags
         test_result["doc"] = test.doc.\
             replace(u'"', u"'").\
@@ -968,13 +1092,33 @@ class ExecutionChecker(ResultVisitor):
             replace(u'\r', u'').\
             replace(u'[', u' |br| [').\
             replace(u' |br| [', u'[', 1)
             replace(u'\r', u'').\
             replace(u'[', u' |br| [').\
             replace(u' |br| [', u'[', 1)
-        test_result[u"msg"] = test.message.\
-            replace(u'\n', u' |br| ').\
-            replace(u'\r', u'').\
-            replace(u'"', u"'")
         test_result[u"type"] = u"FUNC"
         test_result[u"status"] = test.status
 
         test_result[u"type"] = u"FUNC"
         test_result[u"status"] = test.status
 
+        if test.status == u"PASS":
+            if u"NDRPDR" in tags:
+                if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
+                    test_result[u"msg"] = self._get_data_from_pps_test_msg(
+                        test.message).replace(u'\n', u' |br| '). \
+                        replace(u'\r', u'').replace(u'"', u"'")
+                elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
+                    test_result[u"msg"] = self._get_data_from_cps_test_msg(
+                        test.message).replace(u'\n', u' |br| '). \
+                        replace(u'\r', u'').replace(u'"', u"'")
+                else:
+                    test_result[u"msg"] = self._get_data_from_perf_test_msg(
+                        test.message).replace(u'\n', u' |br| ').\
+                        replace(u'\r', u'').replace(u'"', u"'")
+            elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
+                test_result[u"msg"] = self._get_data_from_mrr_test_msg(
+                    test.message).replace(u'\n', u' |br| ').\
+                    replace(u'\r', u'').replace(u'"', u"'")
+            else:
+                test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
+                    replace(u'\r', u'').replace(u'"', u"'")
+        else:
+            test_result[u"msg"] = u"Test Failed."
+
         if u"PERFTEST" in tags:
             # Replace info about cores (e.g. -1c-) with the info about threads
             # and cores (e.g. -1t1c-) in the long test case names and in the
         if u"PERFTEST" in tags:
             # Replace info about cores (e.g. -1c-) with the info about threads
             # and cores (e.g. -1t1c-) in the long test case names and in the
@@ -990,14 +1134,14 @@ class ExecutionChecker(ResultVisitor):
                         tag_tc = tag
 
                 if tag_count == 1:
                         tag_tc = tag
 
                 if tag_count == 1:
-                    self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
-                                           f"-{tag_tc.lower()}-",
-                                           self._test_id,
-                                           count=1)
-                    test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
-                                                  f"-{tag_tc.lower()}-",
-                                                  test_result["name"],
-                                                  count=1)
+                    self._test_id = re.sub(
+                        self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
+                        self._test_id, count=1
+                    )
+                    test_result[u"name"] = re.sub(
+                        self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
+                        test_result["name"], count=1
+                    )
                 else:
                     test_result[u"status"] = u"FAIL"
                     self._data[u"tests"][self._test_id] = test_result
                 else:
                     test_result[u"status"] = u"FAIL"
                     self._data[u"tests"][self._test_id] = test_result
@@ -1009,31 +1153,20 @@ class ExecutionChecker(ResultVisitor):
                     return
 
         if test.status == u"PASS":
                     return
 
         if test.status == u"PASS":
-            if u"NDRPDR" in tags:
-                test_result[u"msg"] = self._get_data_from_perf_test_msg(
-                    test.message). \
-                    replace(u'\n', u' |br| '). \
-                    replace(u'\r', u''). \
-                    replace(u'"', u"'")
-                test_result[u"type"] = u"NDRPDR"
+            if u"DEVICETEST" in tags:
+                test_result[u"type"] = u"DEVICETEST"
+            elif u"NDRPDR" in tags:
+                if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
+                    test_result[u"type"] = u"CPS"
+                else:
+                    test_result[u"type"] = u"NDRPDR"
                 test_result[u"throughput"], test_result[u"status"] = \
                     self._get_ndrpdr_throughput(test.message)
                 test_result[u"throughput"], test_result[u"status"] = \
                     self._get_ndrpdr_throughput(test.message)
+                test_result[u"gbps"], test_result[u"status"] = \
+                    self._get_ndrpdr_throughput_gbps(test.message)
                 test_result[u"latency"], test_result[u"status"] = \
                     self._get_ndrpdr_latency(test.message)
                 test_result[u"latency"], test_result[u"status"] = \
                     self._get_ndrpdr_latency(test.message)
-            elif u"SOAK" in tags:
-                test_result[u"type"] = u"SOAK"
-                test_result[u"throughput"], test_result[u"status"] = \
-                    self._get_plr_throughput(test.message)
-            elif u"TCP" in tags:
-                test_result[u"type"] = u"TCP"
-                groups = re.search(self.REGEX_TCP, test.message)
-                test_result[u"result"] = int(groups.group(2))
             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
             elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
-                test_result[u"msg"] = self._get_data_from_mrr_test_msg(
-                    test.message). \
-                    replace(u'\n', u' |br| '). \
-                    replace(u'\r', u''). \
-                    replace(u'"', u"'")
                 if u"MRR" in tags:
                     test_result[u"type"] = u"MRR"
                 else:
                 if u"MRR" in tags:
                     test_result[u"type"] = u"MRR"
                 else:
@@ -1043,15 +1176,30 @@ class ExecutionChecker(ResultVisitor):
                 groups = re.search(self.REGEX_BMRR, test.message)
                 if groups is not None:
                     items_str = groups.group(1)
                 groups = re.search(self.REGEX_BMRR, test.message)
                 if groups is not None:
                     items_str = groups.group(1)
-                    items_float = [float(item.strip()) for item
-                                   in items_str.split(",")]
+                    items_float = [
+                        float(item.strip().replace(u"'", u""))
+                        for item in items_str.split(",")
+                    ]
                     # Use whole list in CSIT-1180.
                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
                     test_result[u"result"][u"receive-rate"] = stats.avg
                     # Use whole list in CSIT-1180.
                     stats = jumpavg.AvgStdevStats.for_runs(items_float)
                     test_result[u"result"][u"receive-rate"] = stats.avg
+                    test_result[u"result"][u"receive-stdev"] = stats.stdev
                 else:
                     groups = re.search(self.REGEX_MRR, test.message)
                     test_result[u"result"][u"receive-rate"] = \
                         float(groups.group(3)) / float(groups.group(1))
                 else:
                     groups = re.search(self.REGEX_MRR, test.message)
                     test_result[u"result"][u"receive-rate"] = \
                         float(groups.group(3)) / float(groups.group(1))
+            elif u"SOAK" in tags:
+                test_result[u"type"] = u"SOAK"
+                test_result[u"throughput"], test_result[u"status"] = \
+                    self._get_plr_throughput(test.message)
+            elif u"HOSTSTACK" in tags:
+                test_result[u"type"] = u"HOSTSTACK"
+                test_result[u"result"], test_result[u"status"] = \
+                    self._get_hoststack_data(test.message, tags)
+            elif u"TCP" in tags:
+                test_result[u"type"] = u"TCP"
+                groups = re.search(self.REGEX_TCP, test.message)
+                test_result[u"result"] = int(groups.group(2))
             elif u"RECONF" in tags:
                 test_result[u"type"] = u"RECONF"
                 test_result[u"result"] = None
             elif u"RECONF" in tags:
                 test_result[u"type"] = u"RECONF"
                 test_result[u"result"] = None
@@ -1064,8 +1212,6 @@ class ExecutionChecker(ResultVisitor):
                     }
                 except (AttributeError, IndexError, ValueError, TypeError):
                     test_result[u"status"] = u"FAIL"
                     }
                 except (AttributeError, IndexError, ValueError, TypeError):
                     test_result[u"status"] = u"FAIL"
-            elif u"DEVICETEST" in tags:
-                test_result[u"type"] = u"DEVICETEST"
             else:
                 test_result[u"status"] = u"FAIL"
                 self._data[u"tests"][self._test_id] = test_result
             else:
                 test_result[u"status"] = u"FAIL"
                 self._data[u"tests"][self._test_id] = test_result
@@ -1138,11 +1284,10 @@ class ExecutionChecker(ResultVisitor):
         :returns: Nothing.
         """
         if test_kw.name.count(u"Show Runtime On All Duts") or \
         :returns: Nothing.
         """
         if test_kw.name.count(u"Show Runtime On All Duts") or \
-                test_kw.name.count(u"Show Runtime Counters On All Duts"):
+                test_kw.name.count(u"Show Runtime Counters On All Duts") or \
+                test_kw.name.count(u"Vpp Show Runtime On All Duts"):
             self._msg_type = u"test-show-runtime"
             self._sh_run_counter += 1
             self._msg_type = u"test-show-runtime"
             self._sh_run_counter += 1
-        elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
-            self._msg_type = u"dpdk-version"
         else:
             return
         test_kw.messages.visit(self)
         else:
             return
         test_kw.messages.visit(self)
@@ -1179,6 +1324,9 @@ class ExecutionChecker(ResultVisitor):
         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
                 and not self._version:
             self._msg_type = u"vpp-version"
         if setup_kw.name.count(u"Show Vpp Version On All Duts") \
                 and not self._version:
             self._msg_type = u"vpp-version"
+        elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
+                not self._version:
+            self._msg_type = u"dpdk-version"
         elif setup_kw.name.count(u"Set Global Variable") \
                 and not self._timestamp:
             self._msg_type = u"timestamp"
         elif setup_kw.name.count(u"Set Global Variable") \
                 and not self._timestamp:
             self._msg_type = u"timestamp"
@@ -1253,7 +1401,6 @@ class ExecutionChecker(ResultVisitor):
         :type msg: Message
         :returns: Nothing.
         """
         :type msg: Message
         :returns: Nothing.
         """
-
         if self._msg_type:
             self.parse_msg[self._msg_type](msg)
 
         if self._msg_type:
             self.parse_msg[self._msg_type](msg)
 
@@ -1315,7 +1462,6 @@ class InputData:
         :returns: Metadata
         :rtype: pandas.Series
         """
         :returns: Metadata
         :rtype: pandas.Series
         """
-
         return self.data[job][build][u"metadata"]
 
     def suites(self, job, build):
         return self.data[job][build][u"metadata"]
 
     def suites(self, job, build):
@@ -1328,7 +1474,6 @@ class InputData:
         :returns: Suites.
         :rtype: pandas.Series
         """
         :returns: Suites.
         :rtype: pandas.Series
         """
-
         return self.data[job][str(build)][u"suites"]
 
     def tests(self, job, build):
         return self.data[job][str(build)][u"suites"]
 
     def tests(self, job, build):
@@ -1341,19 +1486,16 @@ class InputData:
         :returns: Tests.
         :rtype: pandas.Series
         """
         :returns: Tests.
         :rtype: pandas.Series
         """
-
         return self.data[job][build][u"tests"]
 
         return self.data[job][build][u"tests"]
 
-    def _parse_tests(self, job, build, log):
+    def _parse_tests(self, job, build):
         """Process data from robot output.xml file and return JSON structured
         data.
 
         :param job: The name of job which build output data will be processed.
         :param build: The build which output data will be processed.
         """Process data from robot output.xml file and return JSON structured
         data.
 
         :param job: The name of job which build output data will be processed.
         :param build: The build which output data will be processed.
-        :param log: List of log messages.
         :type job: str
         :type build: dict
         :type job: str
         :type build: dict
-        :type log: list of tuples (severity, msg)
         :returns: JSON data structure.
         :rtype: dict
         """
         :returns: JSON data structure.
         :rtype: dict
         """
@@ -1367,9 +1509,8 @@ class InputData:
             try:
                 result = ExecutionResult(data_file)
             except errors.DataError as err:
             try:
                 result = ExecutionResult(data_file)
             except errors.DataError as err:
-                log.append(
-                    (u"ERROR", f"Error occurred while parsing output.xml: "
-                               f"{repr(err)}")
+                logging.error(
+                    f"Error occurred while parsing output.xml: {repr(err)}"
                 )
                 return None
         checker = ExecutionChecker(metadata, self._cfg.mapping,
                 )
                 return None
         checker = ExecutionChecker(metadata, self._cfg.mapping,
@@ -1394,40 +1535,30 @@ class InputData:
         :type repeat: int
         """
 
         :type repeat: int
         """
 
-        logs = list()
-
-        logs.append(
-            (u"INFO", f"  Processing the job/build: {job}: {build[u'build']}")
-        )
+        logging.info(f"  Processing the job/build: {job}: {build[u'build']}")
 
         state = u"failed"
         success = False
         data = None
         do_repeat = repeat
         while do_repeat:
 
         state = u"failed"
         success = False
         data = None
         do_repeat = repeat
         while do_repeat:
-            success = download_and_unzip_data_file(self._cfg, job, build, pid,
-                                                   logs)
+            success = download_and_unzip_data_file(self._cfg, job, build, pid)
             if success:
                 break
             do_repeat -= 1
         if not success:
             if success:
                 break
             do_repeat -= 1
         if not success:
-            logs.append(
-                (u"ERROR",
-                 f"It is not possible to download the input data file from the "
-                 f"job {job}, build {build[u'build']}, or it is damaged. "
-                 f"Skipped.")
+            logging.error(
+                f"It is not possible to download the input data file from the "
+                f"job {job}, build {build[u'build']}, or it is damaged. "
+                f"Skipped."
             )
         if success:
             )
         if success:
-            logs.append(
-                (u"INFO",
-                 f"    Processing data from the build {build[u'build']} ...")
-            )
-            data = self._parse_tests(job, build, logs)
+            logging.info(f"    Processing data from build {build[u'build']}")
+            data = self._parse_tests(job, build)
             if data is None:
             if data is None:
-                logs.append(
-                    (u"ERROR",
-                     f"Input data file from the job {job}, build "
-                     f"{build[u'build']} is damaged. Skipped.")
+                logging.error(
+                    f"Input data file from the job {job}, build "
+                    f"{build[u'build']} is damaged. Skipped."
                 )
             else:
                 state = u"processed"
                 )
             else:
                 state = u"processed"
@@ -1435,13 +1566,13 @@ class InputData:
             try:
                 remove(build[u"file-name"])
             except OSError as err:
             try:
                 remove(build[u"file-name"])
             except OSError as err:
-                logs.append(
-                    ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
-                              f"{repr(err)}")
+                logging.error(
+                    f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
                 )
 
         # If the time-period is defined in the specification file, remove all
         # files which are outside the time period.
                 )
 
         # If the time-period is defined in the specification file, remove all
         # files which are outside the time period.
+        is_last = False
         timeperiod = self._cfg.input.get(u"time-period", None)
         if timeperiod and data:
             now = dt.utcnow()
         timeperiod = self._cfg.input.get(u"time-period", None)
         if timeperiod and data:
             now = dt.utcnow()
@@ -1455,26 +1586,20 @@ class InputData:
                         # Remove the data and the file:
                         state = u"removed"
                         data = None
                         # Remove the data and the file:
                         state = u"removed"
                         data = None
-                        logs.append(
-                            (u"INFO",
-                             f"    The build {job}/{build[u'build']} is "
-                             f"outdated, will be removed.")
+                        is_last = True
+                        logging.info(
+                            f"    The build {job}/{build[u'build']} is "
+                            f"outdated, will be removed."
                         )
                         )
-        logs.append((u"INFO", u"  Done."))
-
-        for level, line in logs:
-            if level == u"INFO":
-                logging.info(line)
-            elif level == u"ERROR":
-                logging.error(line)
-            elif level == u"DEBUG":
-                logging.debug(line)
-            elif level == u"CRITICAL":
-                logging.critical(line)
-            elif level == u"WARNING":
-                logging.warning(line)
-
-        return {u"data": data, u"state": state, u"job": job, u"build": build}
+        logging.info(u"  Done.")
+
+        return {
+            u"data": data,
+            u"state": state,
+            u"job": job,
+            u"build": build,
+            u"last": is_last
+        }
 
     def download_and_parse_data(self, repeat=1):
         """Download the input data files, parse input data from input files and
 
     def download_and_parse_data(self, repeat=1):
         """Download the input data files, parse input data from input files and
@@ -1491,6 +1616,8 @@ class InputData:
             for build in builds:
 
                 result = self._download_and_parse_build(job, build, repeat)
             for build in builds:
 
                 result = self._download_and_parse_build(job, build, repeat)
+                if result[u"last"]:
+                    break
                 build_nr = result[u"build"][u"build"]
 
                 if result[u"data"]:
                 build_nr = result[u"build"][u"build"]
 
                 if result[u"data"]:
@@ -1525,6 +1652,127 @@ class InputData:
 
         logging.info(u"Done.")
 
 
         logging.info(u"Done.")
 
+    def process_local_file(self, local_file, job=u"local", build_nr=1,
+                           replace=True):
+        """Process local XML file given as a command-line parameter.
+
+        :param local_file: The file to process.
+        :param job: Job name.
+        :param build_nr: Build number.
+        :param replace: If True, the information about jobs and builds is
+            replaced by the new one, otherwise the new jobs and builds are
+            added.
+        :type local_file: str
+        :type job: str
+        :type build_nr: int
+        :type replace: bool
+        :raises: PresentationError if an error occurs.
+        """
+        if not isfile(local_file):
+            raise PresentationError(f"The file {local_file} does not exist.")
+
+        try:
+            build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
+        except (IndexError, ValueError):
+            pass
+
+        build = {
+            u"build": build_nr,
+            u"status": u"failed",
+            u"file-name": local_file
+        }
+        if replace:
+            self._cfg.builds = dict()
+        self._cfg.add_build(job, build)
+
+        logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
+        data = self._parse_tests(job, build)
+        if data is None:
+            raise PresentationError(
+                f"Error occurred while parsing the file {local_file}"
+            )
+
+        build_data = pd.Series({
+            u"metadata": pd.Series(
+                list(data[u"metadata"].values()),
+                index=list(data[u"metadata"].keys())
+            ),
+            u"suites": pd.Series(
+                list(data[u"suites"].values()),
+                index=list(data[u"suites"].keys())
+            ),
+            u"tests": pd.Series(
+                list(data[u"tests"].values()),
+                index=list(data[u"tests"].keys())
+            )
+        })
+
+        if self._input_data.get(job, None) is None:
+            self._input_data[job] = pd.Series()
+        self._input_data[job][str(build_nr)] = build_data
+
+        self._cfg.set_input_state(job, build_nr, u"processed")
+
+    def process_local_directory(self, local_dir, replace=True):
+        """Process local directory with XML file(s). The directory is processed
+        as a 'job' and the XML files in it as builds.
+        If the given directory contains only sub-directories, these
+        sub-directories processed as jobs and corresponding XML files as builds
+        of their job.
+
+        :param local_dir: Local directory to process.
+        :param replace: If True, the information about jobs and builds is
+            replaced by the new one, otherwise the new jobs and builds are
+            added.
+        :type local_dir: str
+        :type replace: bool
+        """
+        if not isdir(local_dir):
+            raise PresentationError(
+                f"The directory {local_dir} does not exist."
+            )
+
+        # Check if the given directory includes only files, or only directories
+        _, dirnames, filenames = next(walk(local_dir))
+
+        if filenames and not dirnames:
+            filenames.sort()
+            # local_builds:
+            # key: dir (job) name, value: list of file names (builds)
+            local_builds = {
+                local_dir: [join(local_dir, name) for name in filenames]
+            }
+
+        elif dirnames and not filenames:
+            dirnames.sort()
+            # local_builds:
+            # key: dir (job) name, value: list of file names (builds)
+            local_builds = dict()
+            for dirname in dirnames:
+                builds = [
+                    join(local_dir, dirname, name)
+                    for name in listdir(join(local_dir, dirname))
+                    if isfile(join(local_dir, dirname, name))
+                ]
+                if builds:
+                    local_builds[dirname] = sorted(builds)
+
+        elif not filenames and not dirnames:
+            raise PresentationError(f"The directory {local_dir} is empty.")
+        else:
+            raise PresentationError(
+                f"The directory {local_dir} can include only files or only "
+                f"directories, not both.\nThe directory {local_dir} includes "
+                f"file(s):\n{filenames}\nand directories:\n{dirnames}"
+            )
+
+        if replace:
+            self._cfg.builds = dict()
+
+        for job, files in local_builds.items():
+            for idx, local_file in enumerate(files):
+                self.process_local_file(local_file, job, idx + 1, replace=False)
+
     @staticmethod
     def _end_of_tag(tag_filter, start=0, closer=u"'"):
         """Return the index of character in the string which is the end of tag.
     @staticmethod
     def _end_of_tag(tag_filter, start=0, closer=u"'"):
         """Return the index of character in the string which is the end of tag.
@@ -1538,7 +1786,6 @@ class InputData:
         :returns: The index of the tag closer.
         :rtype: int
         """
         :returns: The index of the tag closer.
         :rtype: int
         """
-
         try:
             idx_opener = tag_filter.index(closer, start)
             return tag_filter.index(closer, idx_opener + 1)
         try:
             idx_opener = tag_filter.index(closer, start)
             return tag_filter.index(closer, idx_opener + 1)
@@ -1554,7 +1801,6 @@ class InputData:
         :returns: Conditional statement which can be evaluated.
         :rtype: str
         """
         :returns: Conditional statement which can be evaluated.
         :rtype: str
         """
-
         index = 0
         while True:
             index = InputData._end_of_tag(tag_filter, index)
         index = 0
         while True:
             index = InputData._end_of_tag(tag_filter, index)
@@ -1568,7 +1814,6 @@ class InputData:
         """Filter required data from the given jobs and builds.
 
         The output data structure is:
         """Filter required data from the given jobs and builds.
 
         The output data structure is:
-
         - job 1
           - build 1
             - test (or suite) 1 ID:
         - job 1
           - build 1
             - test (or suite) 1 ID:
@@ -1671,7 +1916,6 @@ class InputData:
         """Filter required data from the given jobs and builds.
 
         The output data structure is:
         """Filter required data from the given jobs and builds.
 
         The output data structure is:
-
         - job 1
           - build 1
             - test (or suite) 1 ID:
         - job 1
           - build 1
             - test (or suite) 1 ID:
@@ -1741,9 +1985,10 @@ class InputData:
                                                 data[job][str(build)][
                                                     test_id][param] = u"No Data"
                         except KeyError as err:
                                                 data[job][str(build)][
                                                     test_id][param] = u"No Data"
                         except KeyError as err:
-                            logging.error(repr(err))
                             if continue_on_error:
                             if continue_on_error:
+                                logging.debug(repr(err))
                                 continue
                                 continue
+                            logging.error(repr(err))
                             return None
             return data
 
                             return None
             return data
 
@@ -1785,7 +2030,6 @@ class InputData:
             for item in builds.values:
                 for item_id, item_data in item.items():
                     merged_data[item_id] = item_data
             for item in builds.values:
                 for item_id, item_data in item.items():
                     merged_data[item_id] = item_data
-
         return merged_data
 
     def print_all_oper_data(self):
         return merged_data
 
     def print_all_oper_data(self):