REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
- REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
+ REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
r'tx\s(\d*),\srx\s(\d*)')
REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
r' in packets per second: \[(.*)\]')
+ REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
+ REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
+
REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
- REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
+ REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
def __init__(self, metadata, mapping, ignore):
"""Initialisation.
self._data["tests"][self._test_ID]["conf-history"] = str()
else:
self._msg_type = None
- text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
+ text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
"VAT command history:", "", msg.message, count=1). \
replace("\n\n", "\n").replace('\n', ' |br| ').\
replace('\r', '').replace('"', "'")
self._data["tests"][self._test_ID]["conf-history"] = str()
else:
self._msg_type = None
- text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
+ text = re.sub("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
"PAPI command history:", "", msg.message, count=1). \
replace("\n\n", "\n").replace('\n', ' |br| ').\
replace('\r', '').replace('"', "'")
self._data["tests"][self._test_ID]["show-run"] = str()
if self._lookup_kw_nr > 1:
self._msg_type = None
- if self._show_run_lookup_nr == 1:
+ if self._show_run_lookup_nr > 0:
message = str(msg.message).replace(' ', '').replace('\n', '').\
replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
runtime = loads(message)
try:
self._data["tests"][self._test_ID]["show-run"] += " |br| "
self._data["tests"][self._test_ID]["show-run"] += \
- "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
+ "**DUT" + str(self._show_run_lookup_nr) + ":** |br| " \
+ + text
except KeyError:
pass
groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
if not groups:
tag_count = 0
+ tag_tc = str()
for tag in test_result["tags"]:
groups = re.search(self.REGEX_TC_TAG, tag)
if groups:
"SOAK" in tags or
"TCP" in tags or
"MRR" in tags or
- "BMRR" in tags):
+ "BMRR" in tags or
+ "RECONF" in tags):
# TODO: Remove when definitely no NDRPDRDISC tests are used:
if "NDRDISC" in tags:
test_result["type"] = "NDR"
test_result["type"] = "MRR"
elif "FRMOBL" in tags or "BMRR" in tags:
test_result["type"] = "BMRR"
+ elif "RECONF" in tags:
+ test_result["type"] = "RECONF"
else:
test_result["status"] = "FAIL"
self._data["tests"][self._test_ID] = test_result
AvgStdevMetadataFactory.from_data([
float(groups.group(3)) / float(groups.group(1)), ])
+ elif test_result["type"] == "RECONF":
+ test_result["result"] = None
+ try:
+ grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
+ grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
+ test_result["result"] = {
+ "loss": int(grps_loss.group(1)),
+ "time": float(grps_time.group(1))
+ }
+ except (AttributeError, IndexError, ValueError, TypeError):
+ test_result["status"] = "FAIL"
+
self._data["tests"][self._test_ID] = test_result
def end_test(self, test):
index += 1
tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
- def filter_data(self, element, params=None, data_set="tests",
+ def filter_data(self, element, params=None, data=None, data_set="tests",
continue_on_error=False):
"""Filter required data from the given jobs and builds.
:param element: Element which will use the filtered data.
:param params: Parameters which will be included in the output. If None,
- all parameters are included.
+ all parameters are included.
+ :param data: If not None, this data is used instead of data specified
+ in the element.
:param data_set: The set of data to be filtered: tests, suites,
- metadata.
+ metadata.
:param continue_on_error: Continue if there is error while reading the
- data. The Item will be empty then
+ data. The Item will be empty then
:type element: pandas.Series
:type params: list
+ :type data: dict
:type data_set: str
:type continue_on_error: bool
:returns: Filtered data.
if params:
params.append("type")
+ data_to_filter = data if data else element["data"]
data = pd.Series()
try:
- for job, builds in element["data"].items():
+ for job, builds in data_to_filter.items():
data[job] = pd.Series()
for build in builds:
data[job][str(build)] = pd.Series()
"tags are enclosed by apostrophes.".format(cond))
return None
+ def filter_tests_by_name(self, element, params=None, data_set="tests",
+ continue_on_error=False):
+ """Filter required data from the given jobs and builds.
+
+ The output data structure is:
+
+ - job 1
+ - build 1
+ - test (or suite) 1 ID:
+ - param 1
+ - param 2
+ ...
+ - param n
+ ...
+ - test (or suite) n ID:
+ ...
+ ...
+ - build n
+ ...
+ - job n
+
+ :param element: Element which will use the filtered data.
+ :param params: Parameters which will be included in the output. If None,
+ all parameters are included.
+ :param data_set: The set of data to be filtered: tests, suites,
+ metadata.
+ :param continue_on_error: Continue if there is error while reading the
+ data. The Item will be empty then
+ :type element: pandas.Series
+ :type params: list
+ :type data_set: str
+ :type continue_on_error: bool
+ :returns: Filtered data.
+ :rtype pandas.Series
+ """
+
+ include = element.get("include", None)
+ if not include:
+ logging.warning("No tests to include, skipping the element.")
+ return None
+
+ if params is None:
+ params = element.get("parameters", None)
+ if params:
+ params.append("type")
+
+ data = pd.Series()
+ try:
+ for job, builds in element["data"].items():
+ data[job] = pd.Series()
+ for build in builds:
+ data[job][str(build)] = pd.Series()
+ for test in include:
+ try:
+ reg_ex = re.compile(str(test).lower())
+ for test_ID in self.data[job][str(build)]\
+ [data_set].keys():
+ if re.match(reg_ex, str(test_ID).lower()):
+ test_data = self.data[job][str(build)]\
+ [data_set][test_ID]
+ data[job][str(build)][test_ID] = pd.Series()
+ if params is None:
+ for param, val in test_data.items():
+ data[job][str(build)][test_ID]\
+ [param] = val
+ else:
+ for param in params:
+ try:
+ data[job][str(build)][test_ID]\
+ [param] = test_data[param]
+ except KeyError:
+ data[job][str(build)][test_ID]\
+ [param] = "No Data"
+ except KeyError as err:
+ logging.error("{err!r}".format(err=err))
+ if continue_on_error:
+ continue
+ else:
+ return None
+ return data
+
+ except (KeyError, IndexError, ValueError) as err:
+ logging.error("Missing mandatory parameter in the element "
+ "specification: {err!r}".format(err=err))
+ return None
+ except AttributeError as err:
+ logging.error("{err!r}".format(err=err))
+ return None
+
+
@staticmethod
def merge_data(data):
"""Merge data from more jobs and builds to a simple data structure.