1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
22 import multiprocessing
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
35 from input_data_files import download_and_unzip_data_file
36 from utils import Worker
39 class ExecutionChecker(ResultVisitor):
40 """Class to traverse through the test suite structure.
42 The functionality implemented in this class generates a json structure:
48 "generated": "Timestamp",
49 "version": "SUT version",
50 "job": "Jenkins job name",
51 "build": "Information about the build"
54 "Suite long name 1": {
56 "doc": "Suite 1 documentation",
57 "parent": "Suite 1 parent",
58 "level": "Level of the suite in the suite hierarchy"
60 "Suite long name N": {
62 "doc": "Suite N documentation",
63 "parent": "Suite 2 parent",
64 "level": "Level of the suite in the suite hierarchy"
70 "parent": "Name of the parent of the test",
71 "doc": "Test documentation"
73 "tags": ["tag 1", "tag 2", "tag n"],
74 "type": "PDR" | "NDR" | "TCP" | "MRR" | "BMRR",
75 "throughput": { # Only type: "PDR" | "NDR"
77 "unit": "pps" | "bps" | "percentage"
79 "latency": { # Only type: "PDR" | "NDR"
86 "50": { # Only for NDR
91 "10": { # Only for NDR
103 "50": { # Only for NDR
108 "10": { # Only for NDR
115 "result": { # Only type: "TCP"
117 "unit": "cps" | "rps"
119 "result": { # Only type: "MRR" | "BMRR"
120 "receive-rate": AvgStdevMetadata,
122 "lossTolerance": "lossTolerance", # Only type: "PDR"
123 "vat-history": "DUT1 and DUT2 VAT History"
124 "show-run": "Show Run"
136 "metadata": { # Optional
137 "version": "VPP version",
138 "job": "Jenkins job name",
139 "build": "Information about the build"
143 "doc": "Suite 1 documentation",
144 "parent": "Suite 1 parent",
145 "level": "Level of the suite in the suite hierarchy"
148 "doc": "Suite N documentation",
149 "parent": "Suite 2 parent",
150 "level": "Level of the suite in the suite hierarchy"
156 "parent": "Name of the parent of the test",
157 "doc": "Test documentation"
158 "msg": "Test message"
159 "tags": ["tag 1", "tag 2", "tag n"],
160 "vat-history": "DUT1 and DUT2 VAT History"
161 "show-run": "Show Run"
162 "status": "PASS" | "FAIL"
170 .. note:: ID is the lowercase full path to the test.
173 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
175 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
176 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
177 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
178 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
179 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
180 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
181 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
183 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
184 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
185 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
187 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
190 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*)(.*)")
192 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
193 r"(RTE Version: 'DPDK )(.*)(')")
195 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
197 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
198 r'tx\s(\d*),\srx\s(\d*)')
200 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
201 r' in packets per second: \[(.*)\]')
203 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
205 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
207 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
209 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
211 def __init__(self, metadata):
214 :param metadata: Key-value pairs to be included in "metadata" part of
219 # Type of message to parse out from the test messages
220 self._msg_type = None
226 self._timestamp = None
228 # Number of VAT History messages found:
230 # 1 - VAT History of DUT1
231 # 2 - VAT History of DUT2
232 self._lookup_kw_nr = 0
233 self._vat_history_lookup_nr = 0
235 # Number of Show Running messages found
237 # 1 - Show run message found
238 self._show_run_lookup_nr = 0
240 # Test ID of currently processed test- the lowercase full path to the
244 # The main data structure
246 "metadata": OrderedDict(),
247 "suites": OrderedDict(),
248 "tests": OrderedDict()
251 # Save the provided metadata
252 for key, val in metadata.items():
253 self._data["metadata"][key] = val
255 # Dictionary defining the methods used to parse different types of
258 "timestamp": self._get_timestamp,
259 "vpp-version": self._get_vpp_version,
260 "dpdk-version": self._get_dpdk_version,
261 "teardown-vat-history": self._get_vat_history,
262 "test-show-runtime": self._get_show_run
267 """Getter - Data parsed from the XML file.
269 :returns: Data parsed from the XML file.
274 def _get_vpp_version(self, msg):
275 """Called when extraction of VPP version is required.
277 :param msg: Message to process.
282 if msg.message.count("return STDOUT Version:"):
283 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
285 self._data["metadata"]["version"] = self._version
286 self._msg_type = None
288 def _get_dpdk_version(self, msg):
289 """Called when extraction of DPDK version is required.
291 :param msg: Message to process.
296 if msg.message.count("return STDOUT testpmd"):
298 self._version = str(re.search(
299 self.REGEX_VERSION_DPDK, msg.message). group(4))
300 self._data["metadata"]["version"] = self._version
304 self._msg_type = None
306 def _get_timestamp(self, msg):
307 """Called when extraction of timestamp is required.
309 :param msg: Message to process.
314 self._timestamp = msg.timestamp[:14]
315 self._data["metadata"]["generated"] = self._timestamp
316 self._msg_type = None
318 def _get_vat_history(self, msg):
319 """Called when extraction of VAT command history is required.
321 :param msg: Message to process.
325 if msg.message.count("VAT command history:"):
326 self._vat_history_lookup_nr += 1
327 if self._vat_history_lookup_nr == 1:
328 self._data["tests"][self._test_ID]["vat-history"] = str()
330 self._msg_type = None
331 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
332 "VAT command history:", "", msg.message, count=1). \
333 replace("\n\n", "\n").replace('\n', ' |br| ').\
334 replace('\r', '').replace('"', "'")
336 self._data["tests"][self._test_ID]["vat-history"] += " |br| "
337 self._data["tests"][self._test_ID]["vat-history"] += \
338 "**DUT" + str(self._vat_history_lookup_nr) + ":** " + text
340 def _get_show_run(self, msg):
341 """Called when extraction of VPP operational data (output of CLI command
342 Show Runtime) is required.
344 :param msg: Message to process.
348 if msg.message.count("return STDOUT Thread "):
349 self._show_run_lookup_nr += 1
350 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
351 self._data["tests"][self._test_ID]["show-run"] = str()
352 if self._lookup_kw_nr > 1:
353 self._msg_type = None
354 if self._show_run_lookup_nr == 1:
355 text = msg.message.replace("vat# ", "").\
356 replace("return STDOUT ", "").replace("\n\n", "\n").\
357 replace('\n', ' |br| ').\
358 replace('\r', '').replace('"', "'")
360 self._data["tests"][self._test_ID]["show-run"] += " |br| "
361 self._data["tests"][self._test_ID]["show-run"] += \
362 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
366 def _get_latency(self, msg, test_type):
367 """Get the latency data from the test message.
369 :param msg: Message to be parsed.
370 :param test_type: Type of the test - NDR or PDR.
373 :returns: Latencies parsed from the message.
377 if test_type == "NDR":
378 groups = re.search(self.REGEX_LAT_NDR, msg)
379 groups_range = range(1, 7)
380 elif test_type == "PDR":
381 groups = re.search(self.REGEX_LAT_PDR, msg)
382 groups_range = range(1, 3)
387 for idx in groups_range:
389 lat = [int(item) for item in str(groups.group(idx)).split('/')]
390 except (AttributeError, ValueError):
392 latencies.append(lat)
394 keys = ("min", "avg", "max")
402 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
403 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
404 if test_type == "NDR":
405 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
406 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
407 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
408 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
412 def visit_suite(self, suite):
413 """Implements traversing through the suite and its direct children.
415 :param suite: Suite to process.
419 if self.start_suite(suite) is not False:
420 suite.suites.visit(self)
421 suite.tests.visit(self)
422 self.end_suite(suite)
424 def start_suite(self, suite):
425 """Called when suite starts.
427 :param suite: Suite to process.
433 parent_name = suite.parent.name
434 except AttributeError:
437 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
438 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
439 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
441 self._data["suites"][suite.longname.lower().replace('"', "'").
442 replace(" ", "_")] = {
443 "name": suite.name.lower(),
445 "parent": parent_name,
446 "level": len(suite.longname.split("."))
449 suite.keywords.visit(self)
451 def end_suite(self, suite):
452 """Called when suite ends.
454 :param suite: Suite to process.
460 def visit_test(self, test):
461 """Implements traversing through the test.
463 :param test: Test to process.
467 if self.start_test(test) is not False:
468 test.keywords.visit(self)
471 def start_test(self, test):
472 """Called when test starts.
474 :param test: Test to process.
479 tags = [str(tag) for tag in test.tags]
481 test_result["name"] = test.name.lower()
482 # Remove TC number from the TC name (not needed):
483 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "",
485 test_result["parent"] = test.parent.name.lower()
486 test_result["tags"] = tags
487 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
488 replace('\r', '').replace('[', ' |br| [')
489 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
490 test_result["msg"] = test.message.replace('\n', ' |br| '). \
491 replace('\r', '').replace('"', "'")
492 test_result["status"] = test.status
493 # Remove TC number from the TC long name (backward compatibility):
494 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", test.longname.lower())
496 if test.status == "PASS" and ("NDRPDRDISC" in tags or
500 if "NDRDISC" in tags:
502 elif "PDRDISC" in tags:
508 elif "FRMOBL" in tags or "BMRR" in tags:
511 test_result["status"] = "FAIL"
512 self._data["tests"][self._test_ID] = test_result
515 test_result["type"] = test_type
517 # Replace info about cores (e.g. -1c-) with the info about threads
518 # and cores (e.g. -1t1c-) in the long test case names and in the
519 # test case names if necessary.
520 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
523 for tag in test_result["tags"]:
524 groups = re.search(self.REGEX_TC_TAG, tag)
530 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
531 "-{0}-".format(tag_tc.lower()),
534 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
535 "-{0}-".format(tag_tc.lower()),
539 test_result["status"] = "FAIL"
540 self._data["tests"][self._test_ID] = test_result
541 logging.error("The test '{0}' has no or more than one "
542 "multi-threading tags.".format(self._test_ID))
545 if test_type in ("NDR", "PDR"):
547 rate_value = str(re.search(
548 self.REGEX_RATE, test.message).group(1))
549 except AttributeError:
552 rate_unit = str(re.search(
553 self.REGEX_RATE, test.message).group(2))
554 except AttributeError:
557 test_result["throughput"] = dict()
558 test_result["throughput"]["value"] = \
559 int(rate_value.split('.')[0])
560 test_result["throughput"]["unit"] = rate_unit
561 test_result["latency"] = \
562 self._get_latency(test.message, test_type)
563 if test_type == "PDR":
564 test_result["lossTolerance"] = str(re.search(
565 self.REGEX_TOLERANCE, test.message).group(1))
567 elif test_type in ("TCP", ):
568 groups = re.search(self.REGEX_TCP, test.message)
569 test_result["result"] = dict()
570 test_result["result"]["value"] = int(groups.group(2))
571 test_result["result"]["unit"] = groups.group(1)
573 elif test_type in ("MRR", "BMRR"):
574 test_result["result"] = dict()
575 groups = re.search(self.REGEX_BMRR, test.message)
576 if groups is not None:
577 items_str = groups.group(1)
578 items_float = [float(item.strip()) for item
579 in items_str.split(",")]
580 test_result["result"]["receive-rate"] = \
581 AvgStdevMetadataFactory.from_data(items_float)
583 groups = re.search(self.REGEX_MRR, test.message)
584 test_result["result"]["receive-rate"] = \
585 AvgStdevMetadataFactory.from_data([
586 float(groups.group(3)) / float(groups.group(1)), ])
588 self._data["tests"][self._test_ID] = test_result
590 def end_test(self, test):
591 """Called when test ends.
593 :param test: Test to process.
599 def visit_keyword(self, keyword):
600 """Implements traversing through the keyword and its child keywords.
602 :param keyword: Keyword to process.
603 :type keyword: Keyword
606 if self.start_keyword(keyword) is not False:
607 self.end_keyword(keyword)
609 def start_keyword(self, keyword):
610 """Called when keyword starts. Default implementation does nothing.
612 :param keyword: Keyword to process.
613 :type keyword: Keyword
617 if keyword.type == "setup":
618 self.visit_setup_kw(keyword)
619 elif keyword.type == "teardown":
620 self._lookup_kw_nr = 0
621 self.visit_teardown_kw(keyword)
623 self._lookup_kw_nr = 0
624 self.visit_test_kw(keyword)
625 except AttributeError:
628 def end_keyword(self, keyword):
629 """Called when keyword ends. Default implementation does nothing.
631 :param keyword: Keyword to process.
632 :type keyword: Keyword
637 def visit_test_kw(self, test_kw):
638 """Implements traversing through the test keyword and its child
641 :param test_kw: Keyword to process.
642 :type test_kw: Keyword
645 for keyword in test_kw.keywords:
646 if self.start_test_kw(keyword) is not False:
647 self.visit_test_kw(keyword)
648 self.end_test_kw(keyword)
650 def start_test_kw(self, test_kw):
651 """Called when test keyword starts. Default implementation does
654 :param test_kw: Keyword to process.
655 :type test_kw: Keyword
658 if test_kw.name.count("Show Runtime Counters On All Duts"):
659 self._lookup_kw_nr += 1
660 self._show_run_lookup_nr = 0
661 self._msg_type = "test-show-runtime"
662 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
663 self._msg_type = "dpdk-version"
666 test_kw.messages.visit(self)
668 def end_test_kw(self, test_kw):
669 """Called when keyword ends. Default implementation does nothing.
671 :param test_kw: Keyword to process.
672 :type test_kw: Keyword
677 def visit_setup_kw(self, setup_kw):
678 """Implements traversing through the teardown keyword and its child
681 :param setup_kw: Keyword to process.
682 :type setup_kw: Keyword
685 for keyword in setup_kw.keywords:
686 if self.start_setup_kw(keyword) is not False:
687 self.visit_setup_kw(keyword)
688 self.end_setup_kw(keyword)
690 def start_setup_kw(self, setup_kw):
691 """Called when teardown keyword starts. Default implementation does
694 :param setup_kw: Keyword to process.
695 :type setup_kw: Keyword
698 if setup_kw.name.count("Show Vpp Version On All Duts") \
699 and not self._version:
700 self._msg_type = "vpp-version"
702 elif setup_kw.name.count("Setup performance global Variables") \
703 and not self._timestamp:
704 self._msg_type = "timestamp"
707 setup_kw.messages.visit(self)
709 def end_setup_kw(self, setup_kw):
710 """Called when keyword ends. Default implementation does nothing.
712 :param setup_kw: Keyword to process.
713 :type setup_kw: Keyword
718 def visit_teardown_kw(self, teardown_kw):
719 """Implements traversing through the teardown keyword and its child
722 :param teardown_kw: Keyword to process.
723 :type teardown_kw: Keyword
726 for keyword in teardown_kw.keywords:
727 if self.start_teardown_kw(keyword) is not False:
728 self.visit_teardown_kw(keyword)
729 self.end_teardown_kw(keyword)
731 def start_teardown_kw(self, teardown_kw):
732 """Called when teardown keyword starts. Default implementation does
735 :param teardown_kw: Keyword to process.
736 :type teardown_kw: Keyword
740 if teardown_kw.name.count("Show Vat History On All Duts"):
741 self._vat_history_lookup_nr = 0
742 self._msg_type = "teardown-vat-history"
743 teardown_kw.messages.visit(self)
745 def end_teardown_kw(self, teardown_kw):
746 """Called when keyword ends. Default implementation does nothing.
748 :param teardown_kw: Keyword to process.
749 :type teardown_kw: Keyword
754 def visit_message(self, msg):
755 """Implements visiting the message.
757 :param msg: Message to process.
761 if self.start_message(msg) is not False:
762 self.end_message(msg)
764 def start_message(self, msg):
765 """Called when message starts. Get required information from messages:
768 :param msg: Message to process.
774 self.parse_msg[self._msg_type](msg)
776 def end_message(self, msg):
777 """Called when message ends. Default implementation does nothing.
779 :param msg: Message to process.
786 class InputData(object):
789 The data is extracted from output.xml files generated by Jenkins jobs and
790 stored in pandas' DataFrames.
796 (as described in ExecutionChecker documentation)
798 (as described in ExecutionChecker documentation)
800 (as described in ExecutionChecker documentation)
803 def __init__(self, spec):
806 :param spec: Specification.
807 :type spec: Specification
814 self._input_data = pd.Series()
818 """Getter - Input data.
821 :rtype: pandas.Series
823 return self._input_data
825 def metadata(self, job, build):
828 :param job: Job which metadata we want.
829 :param build: Build which metadata we want.
833 :rtype: pandas.Series
836 return self.data[job][build]["metadata"]
838 def suites(self, job, build):
841 :param job: Job which suites we want.
842 :param build: Build which suites we want.
846 :rtype: pandas.Series
849 return self.data[job][str(build)]["suites"]
851 def tests(self, job, build):
854 :param job: Job which tests we want.
855 :param build: Build which tests we want.
859 :rtype: pandas.Series
862 return self.data[job][build]["tests"]
865 def _parse_tests(job, build, log):
866 """Process data from robot output.xml file and return JSON structured
869 :param job: The name of job which build output data will be processed.
870 :param build: The build which output data will be processed.
871 :param log: List of log messages.
874 :type log: list of tuples (severity, msg)
875 :returns: JSON data structure.
884 with open(build["file-name"], 'r') as data_file:
886 result = ExecutionResult(data_file)
887 except errors.DataError as err:
888 log.append(("ERROR", "Error occurred while parsing output.xml: "
891 checker = ExecutionChecker(metadata)
892 result.visit(checker)
896 def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
897 """Download and parse the input data file.
899 :param pid: PID of the process executing this method.
900 :param data_queue: Shared memory between processes. Queue which keeps
901 the result data. This data is then read by the main process and used
902 in further processing.
903 :param job: Name of the Jenkins job which generated the processed input
905 :param build: Information about the Jenkins build which generated the
906 processed input file.
907 :param repeat: Repeat the download specified number of times if not
910 :type data_queue: multiprocessing.Manager().Queue()
918 logging.info(" Processing the job/build: {0}: {1}".
919 format(job, build["build"]))
921 logs.append(("INFO", " Processing the job/build: {0}: {1}".
922 format(job, build["build"])))
929 success = download_and_unzip_data_file(self._cfg, job, build, pid,
935 logs.append(("ERROR", "It is not possible to download the input "
936 "data file from the job '{job}', build "
937 "'{build}', or it is damaged. Skipped.".
938 format(job=job, build=build["build"])))
940 logs.append(("INFO", " Processing data from the build '{0}' ...".
941 format(build["build"])))
942 data = InputData._parse_tests(job, build, logs)
944 logs.append(("ERROR", "Input data file from the job '{job}', "
945 "build '{build}' is damaged. Skipped.".
946 format(job=job, build=build["build"])))
951 remove(build["file-name"])
952 except OSError as err:
953 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
954 format(build["file-name"], err)))
955 logs.append(("INFO", " Done."))
964 data_queue.put(result)
966 def download_and_parse_data(self, repeat=1):
967 """Download the input data files, parse input data from input files and
968 store in pandas' Series.
970 :param repeat: Repeat the download specified number of times if not
975 logging.info("Downloading and parsing input files ...")
977 work_queue = multiprocessing.JoinableQueue()
978 manager = multiprocessing.Manager()
979 data_queue = manager.Queue()
980 cpus = multiprocessing.cpu_count()
983 for cpu in range(cpus):
984 worker = Worker(work_queue,
986 self._download_and_parse_build)
989 workers.append(worker)
990 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
991 format(cpu, worker.pid))
993 for job, builds in self._cfg.builds.items():
995 work_queue.put((job, build, repeat))
999 logging.info("Done.")
1001 while not data_queue.empty():
1002 result = data_queue.get()
1005 build_nr = result["build"]["build"]
1008 data = result["data"]
1009 build_data = pd.Series({
1010 "metadata": pd.Series(data["metadata"].values(),
1011 index=data["metadata"].keys()),
1012 "suites": pd.Series(data["suites"].values(),
1013 index=data["suites"].keys()),
1014 "tests": pd.Series(data["tests"].values(),
1015 index=data["tests"].keys())})
1017 if self._input_data.get(job, None) is None:
1018 self._input_data[job] = pd.Series()
1019 self._input_data[job][str(build_nr)] = build_data
1021 self._cfg.set_input_file_name(job, build_nr,
1022 result["build"]["file-name"])
1024 self._cfg.set_input_state(job, build_nr, result["state"])
1026 for item in result["logs"]:
1027 if item[0] == "INFO":
1028 logging.info(item[1])
1029 elif item[0] == "ERROR":
1030 logging.error(item[1])
1031 elif item[0] == "DEBUG":
1032 logging.debug(item[1])
1033 elif item[0] == "CRITICAL":
1034 logging.critical(item[1])
1035 elif item[0] == "WARNING":
1036 logging.warning(item[1])
1040 # Terminate all workers
1041 for worker in workers:
1045 logging.info("Done.")
1048 def _end_of_tag(tag_filter, start=0, closer="'"):
1049 """Return the index of character in the string which is the end of tag.
1051 :param tag_filter: The string where the end of tag is being searched.
1052 :param start: The index where the searching is stated.
1053 :param closer: The character which is the tag closer.
1054 :type tag_filter: str
1057 :returns: The index of the tag closer.
1062 idx_opener = tag_filter.index(closer, start)
1063 return tag_filter.index(closer, idx_opener + 1)
1068 def _condition(tag_filter):
1069 """Create a conditional statement from the given tag filter.
1071 :param tag_filter: Filter based on tags from the element specification.
1072 :type tag_filter: str
1073 :returns: Conditional statement which can be evaluated.
1079 index = InputData._end_of_tag(tag_filter, index)
1083 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1085 def filter_data(self, element, params=None, data_set="tests",
1086 continue_on_error=False):
1087 """Filter required data from the given jobs and builds.
1089 The output data structure is:
1093 - test (or suite) 1 ID:
1099 - test (or suite) n ID:
1106 :param element: Element which will use the filtered data.
1107 :param params: Parameters which will be included in the output. If None,
1108 all parameters are included.
1109 :param data_set: The set of data to be filtered: tests, suites,
1111 :param continue_on_error: Continue if there is error while reading the
1112 data. The Item will be empty then
1113 :type element: pandas.Series
1116 :type continue_on_error: bool
1117 :returns: Filtered data.
1118 :rtype pandas.Series
1122 if element["filter"] in ("all", "template"):
1125 cond = InputData._condition(element["filter"])
1126 logging.debug(" Filter: {0}".format(cond))
1128 logging.error(" No filter defined.")
1132 params = element.get("parameters", None)
1136 for job, builds in element["data"].items():
1137 data[job] = pd.Series()
1138 for build in builds:
1139 data[job][str(build)] = pd.Series()
1141 data_iter = self.data[job][str(build)][data_set].\
1144 if continue_on_error:
1148 for test_ID, test_data in data_iter:
1149 if eval(cond, {"tags": test_data.get("tags", "")}):
1150 data[job][str(build)][test_ID] = pd.Series()
1152 for param, val in test_data.items():
1153 data[job][str(build)][test_ID][param] = val
1155 for param in params:
1157 data[job][str(build)][test_ID][param] =\
1160 data[job][str(build)][test_ID][param] =\
1164 except (KeyError, IndexError, ValueError) as err:
1165 logging.error(" Missing mandatory parameter in the element "
1166 "specification: {0}".format(err))
1168 except AttributeError:
1171 logging.error(" The filter '{0}' is not correct. Check if all "
1172 "tags are enclosed by apostrophes.".format(cond))
1176 def merge_data(data):
1177 """Merge data from more jobs and builds to a simple data structure.
1179 The output data structure is:
1181 - test (suite) 1 ID:
1187 - test (suite) n ID:
1190 :param data: Data to merge.
1191 :type data: pandas.Series
1192 :returns: Merged data.
1193 :rtype: pandas.Series
1196 logging.info(" Merging data ...")
1198 merged_data = pd.Series()
1199 for _, builds in data.iteritems():
1200 for _, item in builds.iteritems():
1201 for ID, item_data in item.iteritems():
1202 merged_data[ID] = item_data