1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
22 import multiprocessing
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
35 from input_data_files import download_and_unzip_data_file
36 from utils import Worker
39 class ExecutionChecker(ResultVisitor):
40 """Class to traverse through the test suite structure.
42 The functionality implemented in this class generates a json structure:
48 "generated": "Timestamp",
49 "version": "SUT version",
50 "job": "Jenkins job name",
51 "build": "Information about the build"
54 "Suite long name 1": {
56 "doc": "Suite 1 documentation",
57 "parent": "Suite 1 parent",
58 "level": "Level of the suite in the suite hierarchy"
60 "Suite long name N": {
62 "doc": "Suite N documentation",
63 "parent": "Suite 2 parent",
64 "level": "Level of the suite in the suite hierarchy"
70 "parent": "Name of the parent of the test",
71 "doc": "Test documentation"
73 "tags": ["tag 1", "tag 2", "tag n"],
74 "type": "PDR" | "NDR" | "TCP" | "MRR" | "BMRR",
75 "throughput": { # Only type: "PDR" | "NDR"
77 "unit": "pps" | "bps" | "percentage"
79 "latency": { # Only type: "PDR" | "NDR"
86 "50": { # Only for NDR
91 "10": { # Only for NDR
103 "50": { # Only for NDR
108 "10": { # Only for NDR
115 "result": { # Only type: "TCP"
117 "unit": "cps" | "rps"
119 "result": { # Only type: "MRR" | "BMRR"
120 "receive-rate": AvgStdevMetadata,
122 "lossTolerance": "lossTolerance", # Only type: "PDR"
123 "vat-history": "DUT1 and DUT2 VAT History"
124 "show-run": "Show Run"
136 "metadata": { # Optional
137 "version": "VPP version",
138 "job": "Jenkins job name",
139 "build": "Information about the build"
143 "doc": "Suite 1 documentation",
144 "parent": "Suite 1 parent",
145 "level": "Level of the suite in the suite hierarchy"
148 "doc": "Suite N documentation",
149 "parent": "Suite 2 parent",
150 "level": "Level of the suite in the suite hierarchy"
156 "parent": "Name of the parent of the test",
157 "doc": "Test documentation"
158 "msg": "Test message"
159 "tags": ["tag 1", "tag 2", "tag n"],
160 "vat-history": "DUT1 and DUT2 VAT History"
161 "show-run": "Show Run"
162 "status": "PASS" | "FAIL"
170 .. note:: ID is the lowercase full path to the test.
173 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
175 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
176 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
177 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
178 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
179 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
180 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
181 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
183 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
184 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
185 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
187 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
190 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*)(.*)")
192 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
193 r"(RTE Version: 'DPDK )(.*)(')")
195 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
197 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
198 r'tx\s(\d*),\srx\s(\d*)')
200 REGEX_BMRR = re.compile(r'Maximum Receive Rate Results \[(.*)\]')
202 def __init__(self, metadata):
205 :param metadata: Key-value pairs to be included in "metadata" part of
210 # Type of message to parse out from the test messages
211 self._msg_type = None
217 self._timestamp = None
219 # Number of VAT History messages found:
221 # 1 - VAT History of DUT1
222 # 2 - VAT History of DUT2
223 self._lookup_kw_nr = 0
224 self._vat_history_lookup_nr = 0
226 # Number of Show Running messages found
228 # 1 - Show run message found
229 self._show_run_lookup_nr = 0
231 # Test ID of currently processed test- the lowercase full path to the
235 # The main data structure
237 "metadata": OrderedDict(),
238 "suites": OrderedDict(),
239 "tests": OrderedDict()
242 # Save the provided metadata
243 for key, val in metadata.items():
244 self._data["metadata"][key] = val
246 # Dictionary defining the methods used to parse different types of
249 "timestamp": self._get_timestamp,
250 "vpp-version": self._get_vpp_version,
251 "dpdk-version": self._get_dpdk_version,
252 "teardown-vat-history": self._get_vat_history,
253 "test-show-runtime": self._get_show_run
258 """Getter - Data parsed from the XML file.
260 :returns: Data parsed from the XML file.
265 def _get_vpp_version(self, msg):
266 """Called when extraction of VPP version is required.
268 :param msg: Message to process.
273 if msg.message.count("return STDOUT Version:"):
274 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
276 self._data["metadata"]["version"] = self._version
277 self._msg_type = None
279 def _get_dpdk_version(self, msg):
280 """Called when extraction of DPDK version is required.
282 :param msg: Message to process.
287 if msg.message.count("return STDOUT testpmd"):
289 self._version = str(re.search(
290 self.REGEX_VERSION_DPDK, msg.message). group(4))
291 self._data["metadata"]["version"] = self._version
295 self._msg_type = None
297 def _get_timestamp(self, msg):
298 """Called when extraction of timestamp is required.
300 :param msg: Message to process.
305 self._timestamp = msg.timestamp[:14]
306 self._data["metadata"]["generated"] = self._timestamp
307 self._msg_type = None
309 def _get_vat_history(self, msg):
310 """Called when extraction of VAT command history is required.
312 :param msg: Message to process.
316 if msg.message.count("VAT command history:"):
317 self._vat_history_lookup_nr += 1
318 if self._vat_history_lookup_nr == 1:
319 self._data["tests"][self._test_ID]["vat-history"] = str()
321 self._msg_type = None
322 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
323 "VAT command history:", "", msg.message, count=1). \
324 replace("\n\n", "\n").replace('\n', ' |br| ').\
325 replace('\r', '').replace('"', "'")
327 self._data["tests"][self._test_ID]["vat-history"] += " |br| "
328 self._data["tests"][self._test_ID]["vat-history"] += \
329 "**DUT" + str(self._vat_history_lookup_nr) + ":** " + text
331 def _get_show_run(self, msg):
332 """Called when extraction of VPP operational data (output of CLI command
333 Show Runtime) is required.
335 :param msg: Message to process.
339 if msg.message.count("return STDOUT Thread "):
340 self._show_run_lookup_nr += 1
341 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
342 self._data["tests"][self._test_ID]["show-run"] = str()
343 if self._lookup_kw_nr > 1:
344 self._msg_type = None
345 if self._show_run_lookup_nr == 1:
346 text = msg.message.replace("vat# ", "").\
347 replace("return STDOUT ", "").replace("\n\n", "\n").\
348 replace('\n', ' |br| ').\
349 replace('\r', '').replace('"', "'")
351 self._data["tests"][self._test_ID]["show-run"] += " |br| "
352 self._data["tests"][self._test_ID]["show-run"] += \
353 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
357 def _get_latency(self, msg, test_type):
358 """Get the latency data from the test message.
360 :param msg: Message to be parsed.
361 :param test_type: Type of the test - NDR or PDR.
364 :returns: Latencies parsed from the message.
368 if test_type == "NDR":
369 groups = re.search(self.REGEX_LAT_NDR, msg)
370 groups_range = range(1, 7)
371 elif test_type == "PDR":
372 groups = re.search(self.REGEX_LAT_PDR, msg)
373 groups_range = range(1, 3)
378 for idx in groups_range:
380 lat = [int(item) for item in str(groups.group(idx)).split('/')]
381 except (AttributeError, ValueError):
383 latencies.append(lat)
385 keys = ("min", "avg", "max")
393 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
394 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
395 if test_type == "NDR":
396 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
397 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
398 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
399 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
403 def visit_suite(self, suite):
404 """Implements traversing through the suite and its direct children.
406 :param suite: Suite to process.
410 if self.start_suite(suite) is not False:
411 suite.suites.visit(self)
412 suite.tests.visit(self)
413 self.end_suite(suite)
415 def start_suite(self, suite):
416 """Called when suite starts.
418 :param suite: Suite to process.
424 parent_name = suite.parent.name
425 except AttributeError:
428 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
429 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
430 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
432 self._data["suites"][suite.longname.lower().replace('"', "'").
433 replace(" ", "_")] = {
434 "name": suite.name.lower(),
436 "parent": parent_name,
437 "level": len(suite.longname.split("."))
440 suite.keywords.visit(self)
442 def end_suite(self, suite):
443 """Called when suite ends.
445 :param suite: Suite to process.
451 def visit_test(self, test):
452 """Implements traversing through the test.
454 :param test: Test to process.
458 if self.start_test(test) is not False:
459 test.keywords.visit(self)
462 def start_test(self, test):
463 """Called when test starts.
465 :param test: Test to process.
470 tags = [str(tag) for tag in test.tags]
472 test_result["name"] = test.name.lower()
473 test_result["parent"] = test.parent.name.lower()
474 test_result["tags"] = tags
475 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
476 replace('\r', '').replace('[', ' |br| [')
477 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
478 test_result["msg"] = test.message.replace('\n', ' |br| '). \
479 replace('\r', '').replace('"', "'")
480 test_result["status"] = test.status
481 if test.status == "PASS" and ("NDRPDRDISC" in tags or
485 if "NDRDISC" in tags:
487 elif "PDRDISC" in tags:
493 elif "FRMOBL" in tags or "BMRR" in tags:
498 test_result["type"] = test_type
500 if test_type in ("NDR", "PDR"):
502 rate_value = str(re.search(
503 self.REGEX_RATE, test.message).group(1))
504 except AttributeError:
507 rate_unit = str(re.search(
508 self.REGEX_RATE, test.message).group(2))
509 except AttributeError:
512 test_result["throughput"] = dict()
513 test_result["throughput"]["value"] = \
514 int(rate_value.split('.')[0])
515 test_result["throughput"]["unit"] = rate_unit
516 test_result["latency"] = \
517 self._get_latency(test.message, test_type)
518 if test_type == "PDR":
519 test_result["lossTolerance"] = str(re.search(
520 self.REGEX_TOLERANCE, test.message).group(1))
522 elif test_type in ("TCP", ):
523 groups = re.search(self.REGEX_TCP, test.message)
524 test_result["result"] = dict()
525 test_result["result"]["value"] = int(groups.group(2))
526 test_result["result"]["unit"] = groups.group(1)
528 elif test_type in ("MRR", "BMRR"):
529 test_result["result"] = dict()
530 groups = re.search(self.REGEX_BMRR, test.message)
531 if groups is not None:
532 items_str = groups.group(1)
533 items_float = [float(item.strip()) for item
534 in items_str.split(",")]
535 test_result["result"]["receive-rate"] = \
536 AvgStdevMetadataFactory.from_data(items_float)
538 groups = re.search(self.REGEX_MRR, test.message)
539 test_result["result"]["receive-rate"] = \
540 AvgStdevMetadataFactory.from_data([
541 float(groups.group(3)) / float(groups.group(1)), ])
543 self._test_ID = test.longname.lower()
544 self._data["tests"][self._test_ID] = test_result
546 def end_test(self, test):
547 """Called when test ends.
549 :param test: Test to process.
555 def visit_keyword(self, keyword):
556 """Implements traversing through the keyword and its child keywords.
558 :param keyword: Keyword to process.
559 :type keyword: Keyword
562 if self.start_keyword(keyword) is not False:
563 self.end_keyword(keyword)
565 def start_keyword(self, keyword):
566 """Called when keyword starts. Default implementation does nothing.
568 :param keyword: Keyword to process.
569 :type keyword: Keyword
573 if keyword.type == "setup":
574 self.visit_setup_kw(keyword)
575 elif keyword.type == "teardown":
576 self._lookup_kw_nr = 0
577 self.visit_teardown_kw(keyword)
579 self._lookup_kw_nr = 0
580 self.visit_test_kw(keyword)
581 except AttributeError:
584 def end_keyword(self, keyword):
585 """Called when keyword ends. Default implementation does nothing.
587 :param keyword: Keyword to process.
588 :type keyword: Keyword
593 def visit_test_kw(self, test_kw):
594 """Implements traversing through the test keyword and its child
597 :param test_kw: Keyword to process.
598 :type test_kw: Keyword
601 for keyword in test_kw.keywords:
602 if self.start_test_kw(keyword) is not False:
603 self.visit_test_kw(keyword)
604 self.end_test_kw(keyword)
606 def start_test_kw(self, test_kw):
607 """Called when test keyword starts. Default implementation does
610 :param test_kw: Keyword to process.
611 :type test_kw: Keyword
614 if test_kw.name.count("Show Runtime Counters On All Duts"):
615 self._lookup_kw_nr += 1
616 self._show_run_lookup_nr = 0
617 self._msg_type = "test-show-runtime"
618 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
619 self._msg_type = "dpdk-version"
622 test_kw.messages.visit(self)
624 def end_test_kw(self, test_kw):
625 """Called when keyword ends. Default implementation does nothing.
627 :param test_kw: Keyword to process.
628 :type test_kw: Keyword
633 def visit_setup_kw(self, setup_kw):
634 """Implements traversing through the teardown keyword and its child
637 :param setup_kw: Keyword to process.
638 :type setup_kw: Keyword
641 for keyword in setup_kw.keywords:
642 if self.start_setup_kw(keyword) is not False:
643 self.visit_setup_kw(keyword)
644 self.end_setup_kw(keyword)
646 def start_setup_kw(self, setup_kw):
647 """Called when teardown keyword starts. Default implementation does
650 :param setup_kw: Keyword to process.
651 :type setup_kw: Keyword
654 if setup_kw.name.count("Show Vpp Version On All Duts") \
655 and not self._version:
656 self._msg_type = "vpp-version"
658 elif setup_kw.name.count("Setup performance global Variables") \
659 and not self._timestamp:
660 self._msg_type = "timestamp"
663 setup_kw.messages.visit(self)
665 def end_setup_kw(self, setup_kw):
666 """Called when keyword ends. Default implementation does nothing.
668 :param setup_kw: Keyword to process.
669 :type setup_kw: Keyword
674 def visit_teardown_kw(self, teardown_kw):
675 """Implements traversing through the teardown keyword and its child
678 :param teardown_kw: Keyword to process.
679 :type teardown_kw: Keyword
682 for keyword in teardown_kw.keywords:
683 if self.start_teardown_kw(keyword) is not False:
684 self.visit_teardown_kw(keyword)
685 self.end_teardown_kw(keyword)
687 def start_teardown_kw(self, teardown_kw):
688 """Called when teardown keyword starts. Default implementation does
691 :param teardown_kw: Keyword to process.
692 :type teardown_kw: Keyword
696 if teardown_kw.name.count("Show Vat History On All Duts"):
697 self._vat_history_lookup_nr = 0
698 self._msg_type = "teardown-vat-history"
699 teardown_kw.messages.visit(self)
701 def end_teardown_kw(self, teardown_kw):
702 """Called when keyword ends. Default implementation does nothing.
704 :param teardown_kw: Keyword to process.
705 :type teardown_kw: Keyword
710 def visit_message(self, msg):
711 """Implements visiting the message.
713 :param msg: Message to process.
717 if self.start_message(msg) is not False:
718 self.end_message(msg)
720 def start_message(self, msg):
721 """Called when message starts. Get required information from messages:
724 :param msg: Message to process.
730 self.parse_msg[self._msg_type](msg)
732 def end_message(self, msg):
733 """Called when message ends. Default implementation does nothing.
735 :param msg: Message to process.
742 class InputData(object):
745 The data is extracted from output.xml files generated by Jenkins jobs and
746 stored in pandas' DataFrames.
752 (as described in ExecutionChecker documentation)
754 (as described in ExecutionChecker documentation)
756 (as described in ExecutionChecker documentation)
759 def __init__(self, spec):
762 :param spec: Specification.
763 :type spec: Specification
770 self._input_data = pd.Series()
774 """Getter - Input data.
777 :rtype: pandas.Series
779 return self._input_data
781 def metadata(self, job, build):
784 :param job: Job which metadata we want.
785 :param build: Build which metadata we want.
789 :rtype: pandas.Series
792 return self.data[job][build]["metadata"]
794 def suites(self, job, build):
797 :param job: Job which suites we want.
798 :param build: Build which suites we want.
802 :rtype: pandas.Series
805 return self.data[job][str(build)]["suites"]
807 def tests(self, job, build):
810 :param job: Job which tests we want.
811 :param build: Build which tests we want.
815 :rtype: pandas.Series
818 return self.data[job][build]["tests"]
821 def _parse_tests(job, build, log):
822 """Process data from robot output.xml file and return JSON structured
825 :param job: The name of job which build output data will be processed.
826 :param build: The build which output data will be processed.
827 :param log: List of log messages.
830 :type log: list of tuples (severity, msg)
831 :returns: JSON data structure.
840 with open(build["file-name"], 'r') as data_file:
842 result = ExecutionResult(data_file)
843 except errors.DataError as err:
844 log.append(("ERROR", "Error occurred while parsing output.xml: "
847 checker = ExecutionChecker(metadata)
848 result.visit(checker)
852 def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
853 """Download and parse the input data file.
855 :param pid: PID of the process executing this method.
856 :param data_queue: Shared memory between processes. Queue which keeps
857 the result data. This data is then read by the main process and used
858 in further processing.
859 :param job: Name of the Jenkins job which generated the processed input
861 :param build: Information about the Jenkins build which generated the
862 processed input file.
863 :param repeat: Repeat the download specified number of times if not
866 :type data_queue: multiprocessing.Manager().Queue()
874 logging.info(" Processing the job/build: {0}: {1}".
875 format(job, build["build"]))
877 logs.append(("INFO", " Processing the job/build: {0}: {1}".
878 format(job, build["build"])))
885 success = download_and_unzip_data_file(self._cfg, job, build, pid,
891 logs.append(("ERROR", "It is not possible to download the input "
892 "data file from the job '{job}', build "
893 "'{build}', or it is damaged. Skipped.".
894 format(job=job, build=build["build"])))
896 logs.append(("INFO", " Processing data from the build '{0}' ...".
897 format(build["build"])))
898 data = InputData._parse_tests(job, build, logs)
900 logs.append(("ERROR", "Input data file from the job '{job}', "
901 "build '{build}' is damaged. Skipped.".
902 format(job=job, build=build["build"])))
907 remove(build["file-name"])
908 except OSError as err:
909 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
910 format(build["file-name"], err)))
911 logs.append(("INFO", " Done."))
920 data_queue.put(result)
922 def download_and_parse_data(self, repeat=1):
923 """Download the input data files, parse input data from input files and
924 store in pandas' Series.
926 :param repeat: Repeat the download specified number of times if not
931 logging.info("Downloading and parsing input files ...")
933 work_queue = multiprocessing.JoinableQueue()
934 manager = multiprocessing.Manager()
935 data_queue = manager.Queue()
936 cpus = multiprocessing.cpu_count()
939 for cpu in range(cpus):
940 worker = Worker(work_queue,
942 self._download_and_parse_build)
945 workers.append(worker)
946 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
947 format(cpu, worker.pid))
949 for job, builds in self._cfg.builds.items():
951 work_queue.put((job, build, repeat))
955 logging.info("Done.")
957 while not data_queue.empty():
958 result = data_queue.get()
961 build_nr = result["build"]["build"]
964 data = result["data"]
965 build_data = pd.Series({
966 "metadata": pd.Series(data["metadata"].values(),
967 index=data["metadata"].keys()),
968 "suites": pd.Series(data["suites"].values(),
969 index=data["suites"].keys()),
970 "tests": pd.Series(data["tests"].values(),
971 index=data["tests"].keys())})
973 if self._input_data.get(job, None) is None:
974 self._input_data[job] = pd.Series()
975 self._input_data[job][str(build_nr)] = build_data
977 self._cfg.set_input_file_name(job, build_nr,
978 result["build"]["file-name"])
980 self._cfg.set_input_state(job, build_nr, result["state"])
982 for item in result["logs"]:
983 if item[0] == "INFO":
984 logging.info(item[1])
985 elif item[0] == "ERROR":
986 logging.error(item[1])
987 elif item[0] == "DEBUG":
988 logging.debug(item[1])
989 elif item[0] == "CRITICAL":
990 logging.critical(item[1])
991 elif item[0] == "WARNING":
992 logging.warning(item[1])
996 # Terminate all workers
997 for worker in workers:
1001 logging.info("Done.")
1004 def _end_of_tag(tag_filter, start=0, closer="'"):
1005 """Return the index of character in the string which is the end of tag.
1007 :param tag_filter: The string where the end of tag is being searched.
1008 :param start: The index where the searching is stated.
1009 :param closer: The character which is the tag closer.
1010 :type tag_filter: str
1013 :returns: The index of the tag closer.
1018 idx_opener = tag_filter.index(closer, start)
1019 return tag_filter.index(closer, idx_opener + 1)
1024 def _condition(tag_filter):
1025 """Create a conditional statement from the given tag filter.
1027 :param tag_filter: Filter based on tags from the element specification.
1028 :type tag_filter: str
1029 :returns: Conditional statement which can be evaluated.
1035 index = InputData._end_of_tag(tag_filter, index)
1039 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1041 def filter_data(self, element, params=None, data_set="tests",
1042 continue_on_error=False):
1043 """Filter required data from the given jobs and builds.
1045 The output data structure is:
1049 - test (or suite) 1 ID:
1055 - test (or suite) n ID:
1062 :param element: Element which will use the filtered data.
1063 :param params: Parameters which will be included in the output. If None,
1064 all parameters are included.
1065 :param data_set: The set of data to be filtered: tests, suites,
1067 :param continue_on_error: Continue if there is error while reading the
1068 data. The Item will be empty then
1069 :type element: pandas.Series
1072 :type continue_on_error: bool
1073 :returns: Filtered data.
1074 :rtype pandas.Series
1078 if element["filter"] in ("all", "template"):
1081 cond = InputData._condition(element["filter"])
1082 logging.debug(" Filter: {0}".format(cond))
1084 logging.error(" No filter defined.")
1088 params = element.get("parameters", None)
1092 for job, builds in element["data"].items():
1093 data[job] = pd.Series()
1094 for build in builds:
1095 data[job][str(build)] = pd.Series()
1097 data_iter = self.data[job][str(build)][data_set].\
1100 if continue_on_error:
1104 for test_ID, test_data in data_iter:
1105 if eval(cond, {"tags": test_data.get("tags", "")}):
1106 data[job][str(build)][test_ID] = pd.Series()
1108 for param, val in test_data.items():
1109 data[job][str(build)][test_ID][param] = val
1111 for param in params:
1113 data[job][str(build)][test_ID][param] =\
1116 data[job][str(build)][test_ID][param] =\
1120 except (KeyError, IndexError, ValueError) as err:
1121 logging.error(" Missing mandatory parameter in the element "
1122 "specification: {0}".format(err))
1124 except AttributeError:
1127 logging.error(" The filter '{0}' is not correct. Check if all "
1128 "tags are enclosed by apostrophes.".format(cond))
1132 def merge_data(data):
1133 """Merge data from more jobs and builds to a simple data structure.
1135 The output data structure is:
1137 - test (suite) 1 ID:
1143 - test (suite) n ID:
1146 :param data: Data to merge.
1147 :type data: pandas.Series
1148 :returns: Merged data.
1149 :rtype: pandas.Series
1152 logging.info(" Merging data ...")
1154 merged_data = pd.Series()
1155 for _, builds in data.iteritems():
1156 for _, item in builds.iteritems():
1157 for ID, item_data in item.iteritems():
1158 merged_data[ID] = item_data