1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from collections import OrderedDict
25 from numpy import nan, isnan
26 from xml.etree import ElementTree as ET
28 from errors import PresentationError
29 from utils import mean, stdev, relative_change, classify_anomalies
32 def generate_tables(spec, data):
33 """Generate all tables specified in the specification file.
35 :param spec: Specification read from the specification file.
36 :param data: Data to process.
37 :type spec: Specification
41 logging.info("Generating the tables ...")
42 for table in spec.tables:
44 eval(table["algorithm"])(table, data)
45 except NameError as err:
46 logging.error("Probably algorithm '{alg}' is not defined: {err}".
47 format(alg=table["algorithm"], err=repr(err)))
51 def table_details(table, input_data):
52 """Generate the table(s) with algorithm: table_detailed_test_results
53 specified in the specification file.
55 :param table: Table to generate.
56 :param input_data: Data to process.
57 :type table: pandas.Series
58 :type input_data: InputData
61 logging.info(" Generating the table {0} ...".
62 format(table.get("title", "")))
65 logging.info(" Creating the data set for the {0} '{1}'.".
66 format(table.get("type", ""), table.get("title", "")))
67 data = input_data.filter_data(table)
69 # Prepare the header of the tables
71 for column in table["columns"]:
72 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
74 # Generate the data for the table according to the model in the table
76 job = table["data"].keys()[0]
77 build = str(table["data"][job][0])
79 suites = input_data.suites(job, build)
81 logging.error(" No data available. The table will not be generated.")
84 for suite_longname, suite in suites.iteritems():
86 suite_name = suite["name"]
88 for test in data[job][build].keys():
89 if data[job][build][test]["parent"] in suite_name:
91 for column in table["columns"]:
93 col_data = str(data[job][build][test][column["data"].
94 split(" ")[1]]).replace('"', '""')
95 if column["data"].split(" ")[1] in ("vat-history",
97 col_data = replace(col_data, " |br| ", "",
99 col_data = " |prein| {0} |preout| ".\
100 format(col_data[:-5])
101 row_lst.append('"{0}"'.format(col_data))
103 row_lst.append("No data")
104 table_lst.append(row_lst)
106 # Write the data to file
108 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
109 table["output-file-ext"])
110 logging.info(" Writing file: '{}'".format(file_name))
111 with open(file_name, "w") as file_handler:
112 file_handler.write(",".join(header) + "\n")
113 for item in table_lst:
114 file_handler.write(",".join(item) + "\n")
116 logging.info(" Done.")
119 def table_merged_details(table, input_data):
120 """Generate the table(s) with algorithm: table_merged_details
121 specified in the specification file.
123 :param table: Table to generate.
124 :param input_data: Data to process.
125 :type table: pandas.Series
126 :type input_data: InputData
129 logging.info(" Generating the table {0} ...".
130 format(table.get("title", "")))
133 logging.info(" Creating the data set for the {0} '{1}'.".
134 format(table.get("type", ""), table.get("title", "")))
135 data = input_data.filter_data(table)
136 data = input_data.merge_data(data)
137 data.sort_index(inplace=True)
139 logging.info(" Creating the data set for the {0} '{1}'.".
140 format(table.get("type", ""), table.get("title", "")))
141 suites = input_data.filter_data(table, data_set="suites")
142 suites = input_data.merge_data(suites)
144 # Prepare the header of the tables
146 for column in table["columns"]:
147 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
149 for _, suite in suites.iteritems():
151 suite_name = suite["name"]
153 for test in data.keys():
154 if data[test]["parent"] in suite_name:
156 for column in table["columns"]:
158 col_data = str(data[test][column["data"].
159 split(" ")[1]]).replace('"', '""')
160 if column["data"].split(" ")[1] in ("vat-history",
162 col_data = replace(col_data, " |br| ", "",
164 col_data = " |prein| {0} |preout| ".\
165 format(col_data[:-5])
166 row_lst.append('"{0}"'.format(col_data))
168 row_lst.append("No data")
169 table_lst.append(row_lst)
171 # Write the data to file
173 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
174 table["output-file-ext"])
175 logging.info(" Writing file: '{}'".format(file_name))
176 with open(file_name, "w") as file_handler:
177 file_handler.write(",".join(header) + "\n")
178 for item in table_lst:
179 file_handler.write(",".join(item) + "\n")
181 logging.info(" Done.")
184 def table_performance_improvements(table, input_data):
185 """Generate the table(s) with algorithm: table_performance_improvements
186 specified in the specification file.
188 :param table: Table to generate.
189 :param input_data: Data to process.
190 :type table: pandas.Series
191 :type input_data: InputData
194 def _write_line_to_file(file_handler, data):
195 """Write a line to the .csv file.
197 :param file_handler: File handler for the csv file. It must be open for
199 :param data: Item to be written to the file.
200 :type file_handler: BinaryIO
206 if isinstance(item["data"], str):
207 # Remove -?drdisc from the end
208 if item["data"].endswith("drdisc"):
209 item["data"] = item["data"][:-8]
210 line_lst.append(item["data"])
211 elif isinstance(item["data"], float):
212 line_lst.append("{:.1f}".format(item["data"]))
213 elif item["data"] is None:
215 file_handler.write(",".join(line_lst) + "\n")
217 logging.info(" Generating the table {0} ...".
218 format(table.get("title", "")))
221 file_name = table.get("template", None)
224 tmpl = _read_csv_template(file_name)
225 except PresentationError:
226 logging.error(" The template '{0}' does not exist. Skipping the "
227 "table.".format(file_name))
230 logging.error("The template is not defined. Skipping the table.")
234 logging.info(" Creating the data set for the {0} '{1}'.".
235 format(table.get("type", ""), table.get("title", "")))
236 data = input_data.filter_data(table)
238 # Prepare the header of the tables
240 for column in table["columns"]:
241 header.append(column["title"])
243 # Generate the data for the table according to the model in the table
246 for tmpl_item in tmpl:
248 for column in table["columns"]:
249 cmd = column["data"].split(" ")[0]
250 args = column["data"].split(" ")[1:]
251 if cmd == "template":
253 val = float(tmpl_item[int(args[0])])
255 val = tmpl_item[int(args[0])]
256 tbl_item.append({"data": val})
262 for build in data[job]:
264 data_lst.append(float(build[tmpl_item[0]]
265 ["throughput"]["value"]))
266 except (KeyError, TypeError):
270 tbl_item.append({"data": (eval(operation)(data_lst)) /
273 tbl_item.append({"data": None})
274 elif cmd == "operation":
277 nr1 = float(tbl_item[int(args[1])]["data"])
278 nr2 = float(tbl_item[int(args[2])]["data"])
280 tbl_item.append({"data": eval(operation)(nr1, nr2)})
282 tbl_item.append({"data": None})
283 except (IndexError, ValueError, TypeError):
284 logging.error("No data for {0}".format(tbl_item[0]["data"]))
285 tbl_item.append({"data": None})
288 logging.error("Not supported command {0}. Skipping the table.".
291 tbl_lst.append(tbl_item)
293 # Sort the table according to the relative change
294 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
296 # Create the tables and write them to the files
298 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
299 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
300 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
301 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
304 for file_name in file_names:
305 logging.info(" Writing the file '{0}'".format(file_name))
306 with open(file_name, "w") as file_handler:
307 file_handler.write(",".join(header) + "\n")
309 if isinstance(item[-1]["data"], float):
310 rel_change = round(item[-1]["data"], 1)
312 rel_change = item[-1]["data"]
313 if "ndr_top" in file_name \
314 and "ndr" in item[0]["data"] \
315 and rel_change >= 10.0:
316 _write_line_to_file(file_handler, item)
317 elif "pdr_top" in file_name \
318 and "pdr" in item[0]["data"] \
319 and rel_change >= 10.0:
320 _write_line_to_file(file_handler, item)
321 elif "ndr_low" in file_name \
322 and "ndr" in item[0]["data"] \
323 and rel_change < 10.0:
324 _write_line_to_file(file_handler, item)
325 elif "pdr_low" in file_name \
326 and "pdr" in item[0]["data"] \
327 and rel_change < 10.0:
328 _write_line_to_file(file_handler, item)
330 logging.info(" Done.")
333 def _read_csv_template(file_name):
334 """Read the template from a .csv file.
336 :param file_name: Name / full path / relative path of the file to read.
338 :returns: Data from the template as list (lines) of lists (items on line).
340 :raises: PresentationError if it is not possible to read the file.
344 with open(file_name, 'r') as csv_file:
346 for line in csv_file:
347 tmpl_data.append(line[:-1].split(","))
349 except IOError as err:
350 raise PresentationError(str(err), level="ERROR")
353 def table_performance_comparison(table, input_data):
354 """Generate the table(s) with algorithm: table_performance_comparison
355 specified in the specification file.
357 :param table: Table to generate.
358 :param input_data: Data to process.
359 :type table: pandas.Series
360 :type input_data: InputData
363 logging.info(" Generating the table {0} ...".
364 format(table.get("title", "")))
367 logging.info(" Creating the data set for the {0} '{1}'.".
368 format(table.get("type", ""), table.get("title", "")))
369 data = input_data.filter_data(table, continue_on_error=True)
371 # Prepare the header of the tables
373 header = ["Test case", ]
375 history = table.get("history", None)
379 ["{0} Throughput [Mpps]".format(item["title"]),
380 "{0} Stdev [Mpps]".format(item["title"])])
382 ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
383 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
384 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
385 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
387 header_str = ",".join(header) + "\n"
388 except (AttributeError, KeyError) as err:
389 logging.error("The model is invalid, missing parameter: {0}".
393 # Prepare data to the table:
395 for job, builds in table["reference"]["data"].items():
397 for tst_name, tst_data in data[job][str(build)].iteritems():
398 if tbl_dict.get(tst_name, None) is None:
399 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
400 "-".join(tst_data["name"].
402 tbl_dict[tst_name] = {"name": name,
406 tbl_dict[tst_name]["ref-data"].\
407 append(tst_data["throughput"]["value"])
409 pass # No data in output.xml for this test
411 for job, builds in table["compare"]["data"].items():
413 for tst_name, tst_data in data[job][str(build)].iteritems():
415 tbl_dict[tst_name]["cmp-data"].\
416 append(tst_data["throughput"]["value"])
420 tbl_dict.pop(tst_name, None)
423 for job, builds in item["data"].items():
425 for tst_name, tst_data in data[job][str(build)].iteritems():
426 if tbl_dict.get(tst_name, None) is None:
428 if tbl_dict[tst_name].get("history", None) is None:
429 tbl_dict[tst_name]["history"] = OrderedDict()
430 if tbl_dict[tst_name]["history"].get(item["title"],
432 tbl_dict[tst_name]["history"][item["title"]] = \
435 tbl_dict[tst_name]["history"][item["title"]].\
436 append(tst_data["throughput"]["value"])
437 except (TypeError, KeyError):
441 for tst_name in tbl_dict.keys():
442 item = [tbl_dict[tst_name]["name"], ]
444 if tbl_dict[tst_name].get("history", None) is not None:
445 for hist_data in tbl_dict[tst_name]["history"].values():
447 item.append(round(mean(hist_data) / 1000000, 2))
448 item.append(round(stdev(hist_data) / 1000000, 2))
450 item.extend([None, None])
452 item.extend([None, None])
453 data_t = tbl_dict[tst_name]["ref-data"]
455 item.append(round(mean(data_t) / 1000000, 2))
456 item.append(round(stdev(data_t) / 1000000, 2))
458 item.extend([None, None])
459 data_t = tbl_dict[tst_name]["cmp-data"]
461 item.append(round(mean(data_t) / 1000000, 2))
462 item.append(round(stdev(data_t) / 1000000, 2))
464 item.extend([None, None])
465 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
466 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
467 if len(item) == len(header):
470 # Sort the table according to the relative change
471 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
475 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
476 table["output-file-ext"]),
477 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
478 table["output-file-ext"]),
479 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
480 table["output-file-ext"]),
481 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
482 table["output-file-ext"]),
483 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
484 table["output-file-ext"]),
485 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
486 table["output-file-ext"])
488 for file_name in tbl_names:
489 logging.info(" Writing file: '{0}'".format(file_name))
490 with open(file_name, "w") as file_handler:
491 file_handler.write(header_str)
493 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
494 file_name.split("-")[-2] in test[0]): # cores
495 test[0] = "-".join(test[0].split("-")[:-1])
496 file_handler.write(",".join([str(item) for item in test]) +
500 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
501 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
502 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
503 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
504 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
505 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
508 for i, txt_name in enumerate(tbl_names_txt):
510 logging.info(" Writing file: '{0}'".format(txt_name))
511 with open(tbl_names[i], 'rb') as csv_file:
512 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
513 for row in csv_content:
514 if txt_table is None:
515 txt_table = prettytable.PrettyTable(row)
517 txt_table.add_row(row)
518 txt_table.align["Test case"] = "l"
519 with open(txt_name, "w") as txt_file:
520 txt_file.write(str(txt_table))
522 # Selected tests in csv:
523 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
524 table["output-file-ext"])
525 with open(input_file, "r") as in_file:
530 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
531 table["output-file-ext"])
532 logging.info(" Writing file: '{0}'".format(output_file))
533 with open(output_file, "w") as out_file:
534 out_file.write(header_str)
535 for i, line in enumerate(lines[1:]):
536 if i == table["nr-of-tests-shown"]:
540 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
541 table["output-file-ext"])
542 logging.info(" Writing file: '{0}'".format(output_file))
543 with open(output_file, "w") as out_file:
544 out_file.write(header_str)
545 for i, line in enumerate(lines[-1:0:-1]):
546 if i == table["nr-of-tests-shown"]:
550 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
551 table["output-file-ext"])
552 with open(input_file, "r") as in_file:
557 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
558 table["output-file-ext"])
559 logging.info(" Writing file: '{0}'".format(output_file))
560 with open(output_file, "w") as out_file:
561 out_file.write(header_str)
562 for i, line in enumerate(lines[1:]):
563 if i == table["nr-of-tests-shown"]:
567 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
568 table["output-file-ext"])
569 logging.info(" Writing file: '{0}'".format(output_file))
570 with open(output_file, "w") as out_file:
571 out_file.write(header_str)
572 for i, line in enumerate(lines[-1:0:-1]):
573 if i == table["nr-of-tests-shown"]:
578 def table_performance_comparison_mrr(table, input_data):
579 """Generate the table(s) with algorithm: table_performance_comparison_mrr
580 specified in the specification file.
582 :param table: Table to generate.
583 :param input_data: Data to process.
584 :type table: pandas.Series
585 :type input_data: InputData
588 logging.info(" Generating the table {0} ...".
589 format(table.get("title", "")))
592 logging.info(" Creating the data set for the {0} '{1}'.".
593 format(table.get("type", ""), table.get("title", "")))
594 data = input_data.filter_data(table, continue_on_error=True)
596 # Prepare the header of the tables
598 header = ["Test case",
599 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
600 "{0} stdev [Mpps]".format(table["reference"]["title"]),
601 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
602 "{0} stdev [Mpps]".format(table["compare"]["title"]),
604 header_str = ",".join(header) + "\n"
605 except (AttributeError, KeyError) as err:
606 logging.error("The model is invalid, missing parameter: {0}".
610 # Prepare data to the table:
612 for job, builds in table["reference"]["data"].items():
614 for tst_name, tst_data in data[job][str(build)].iteritems():
615 if tbl_dict.get(tst_name, None) is None:
616 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
617 "-".join(tst_data["name"].
619 tbl_dict[tst_name] = {"name": name,
623 tbl_dict[tst_name]["ref-data"].\
624 append(tst_data["result"]["throughput"])
626 pass # No data in output.xml for this test
628 for job, builds in table["compare"]["data"].items():
630 for tst_name, tst_data in data[job][str(build)].iteritems():
632 tbl_dict[tst_name]["cmp-data"].\
633 append(tst_data["result"]["throughput"])
637 tbl_dict.pop(tst_name, None)
640 for tst_name in tbl_dict.keys():
641 item = [tbl_dict[tst_name]["name"], ]
642 data_t = tbl_dict[tst_name]["ref-data"]
644 item.append(round(mean(data_t) / 1000000, 2))
645 item.append(round(stdev(data_t) / 1000000, 2))
647 item.extend([None, None])
648 data_t = tbl_dict[tst_name]["cmp-data"]
650 item.append(round(mean(data_t) / 1000000, 2))
651 item.append(round(stdev(data_t) / 1000000, 2))
653 item.extend([None, None])
654 if item[1] is not None and item[3] is not None and item[1] != 0:
655 item.append(int(relative_change(float(item[1]), float(item[3]))))
659 # Sort the table according to the relative change
660 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
664 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
665 table["output-file-ext"]),
666 "{0}-2t2c-full{1}".format(table["output-file"],
667 table["output-file-ext"]),
668 "{0}-4t4c-full{1}".format(table["output-file"],
669 table["output-file-ext"])
671 for file_name in tbl_names:
672 logging.info(" Writing file: '{0}'".format(file_name))
673 with open(file_name, "w") as file_handler:
674 file_handler.write(header_str)
676 if file_name.split("-")[-2] in test[0]: # cores
677 test[0] = "-".join(test[0].split("-")[:-1])
678 file_handler.write(",".join([str(item) for item in test]) +
682 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
683 "{0}-2t2c-full.txt".format(table["output-file"]),
684 "{0}-4t4c-full.txt".format(table["output-file"])
687 for i, txt_name in enumerate(tbl_names_txt):
689 logging.info(" Writing file: '{0}'".format(txt_name))
690 with open(tbl_names[i], 'rb') as csv_file:
691 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
692 for row in csv_content:
693 if txt_table is None:
694 txt_table = prettytable.PrettyTable(row)
696 txt_table.add_row(row)
697 txt_table.align["Test case"] = "l"
698 with open(txt_name, "w") as txt_file:
699 txt_file.write(str(txt_table))
702 def table_performance_trending_dashboard(table, input_data):
703 """Generate the table(s) with algorithm: table_performance_comparison
704 specified in the specification file.
706 :param table: Table to generate.
707 :param input_data: Data to process.
708 :type table: pandas.Series
709 :type input_data: InputData
712 logging.info(" Generating the table {0} ...".
713 format(table.get("title", "")))
716 logging.info(" Creating the data set for the {0} '{1}'.".
717 format(table.get("type", ""), table.get("title", "")))
718 data = input_data.filter_data(table, continue_on_error=True)
720 # Prepare the header of the tables
721 header = ["Test Case",
723 "Short-Term Change [%]",
724 "Long-Term Change [%]",
729 header_str = ",".join(header) + "\n"
731 # Prepare data to the table:
733 for job, builds in table["data"].items():
735 for tst_name, tst_data in data[job][str(build)].iteritems():
736 if tst_name.lower() in table["ignore-list"]:
738 if tbl_dict.get(tst_name, None) is None:
739 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
740 "-".join(tst_data["name"].
742 tbl_dict[tst_name] = {"name": name,
743 "data": OrderedDict()}
745 tbl_dict[tst_name]["data"][str(build)] = \
746 tst_data["result"]["throughput"]
747 except (TypeError, KeyError):
748 pass # No data in output.xml for this test
751 for tst_name in tbl_dict.keys():
752 if len(tbl_dict[tst_name]["data"]) < 3:
755 data_t = pd.Series(tbl_dict[tst_name]["data"])
756 last_key = data_t.keys()[-1]
757 win_size = min(data_t.size, table["window"])
758 win_first_idx = data_t.size - win_size
759 key_14 = data_t.keys()[win_first_idx]
760 long_win_size = min(data_t.size, table["long-trend-window"])
761 median_t = data_t.rolling(window=win_size, min_periods=2).median()
762 median_first_idx = median_t.size - long_win_size
765 [x for x in median_t.values[median_first_idx:-win_size]
770 last_median_t = median_t[last_key]
774 median_t_14 = median_t[key_14]
778 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
779 rel_change_last = nan
781 rel_change_last = round(
782 ((last_median_t - median_t_14) / median_t_14) * 100, 2)
784 if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
785 rel_change_long = nan
787 rel_change_long = round(
788 ((last_median_t - max_median) / max_median) * 100, 2)
790 # Classification list:
791 classification_lst, _ = classify_anomalies(data_t)
793 if classification_lst:
794 if isnan(rel_change_last) and isnan(rel_change_long):
797 [tbl_dict[tst_name]["name"],
798 '-' if isnan(last_median_t) else
799 round(last_median_t / 1000000, 2),
800 '-' if isnan(rel_change_last) else rel_change_last,
801 '-' if isnan(rel_change_long) else rel_change_long,
802 classification_lst[win_first_idx:].count("regression"),
803 classification_lst[win_first_idx:].count("progression"),
804 classification_lst[win_first_idx:].count("outlier")])
806 tbl_lst.sort(key=lambda rel: rel[0])
809 for nrr in range(table["window"], -1, -1):
810 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
811 for nrp in range(table["window"], -1, -1):
812 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
813 for nro in range(table["window"], -1, -1):
814 tbl_out = [item for item in tbl_pro if item[6] == nro]
815 tbl_out.sort(key=lambda rel: rel[2])
816 tbl_sorted.extend(tbl_out)
818 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
820 logging.info(" Writing file: '{0}'".format(file_name))
821 with open(file_name, "w") as file_handler:
822 file_handler.write(header_str)
823 for test in tbl_sorted:
824 file_handler.write(",".join([str(item) for item in test]) + '\n')
826 txt_file_name = "{0}.txt".format(table["output-file"])
828 logging.info(" Writing file: '{0}'".format(txt_file_name))
829 with open(file_name, 'rb') as csv_file:
830 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
831 for row in csv_content:
832 if txt_table is None:
833 txt_table = prettytable.PrettyTable(row)
835 txt_table.add_row(row)
836 txt_table.align["Test case"] = "l"
837 with open(txt_file_name, "w") as txt_file:
838 txt_file.write(str(txt_table))
841 def table_performance_trending_dashboard_html(table, input_data):
842 """Generate the table(s) with algorithm:
843 table_performance_trending_dashboard_html specified in the specification
846 :param table: Table to generate.
847 :param input_data: Data to process.
848 :type table: pandas.Series
849 :type input_data: InputData
852 logging.info(" Generating the table {0} ...".
853 format(table.get("title", "")))
856 with open(table["input-file"], 'rb') as csv_file:
857 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
858 csv_lst = [item for item in csv_content]
860 logging.warning("The input file is not defined.")
862 except csv.Error as err:
863 logging.warning("Not possible to process the file '{0}'.\n{1}".
864 format(table["input-file"], err))
868 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
871 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
872 for idx, item in enumerate(csv_lst[0]):
873 alignment = "left" if idx == 0 else "center"
874 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
878 colors = {"regression": ("#ffcccc", "#ff9999"),
879 "progression": ("#c6ecc6", "#9fdf9f"),
880 "outlier": ("#e6e6e6", "#cccccc"),
881 "normal": ("#e9f1fb", "#d4e4f7")}
882 for r_idx, row in enumerate(csv_lst[1:]):
886 color = "progression"
891 background = colors[color][r_idx % 2]
892 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
895 for c_idx, item in enumerate(row):
896 alignment = "left" if c_idx == 0 else "center"
897 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
904 if "lbdpdk" in item or "lbvpp" in item:
905 file_name = "link_bonding.html"
907 elif "testpmd" in item or "l3fwd" in item:
908 file_name = "dpdk.html"
910 elif "memif" in item:
911 file_name = "container_memif.html"
914 file_name = "srv6.html"
916 elif "vhost" in item:
917 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
918 file_name = "vm_vhost_l2.html"
919 elif "ip4base" in item:
920 file_name = "vm_vhost_ip4.html"
922 elif "ipsec" in item:
923 file_name = "ipsec.html"
925 elif "ethip4lispip" in item or "ethip4vxlan" in item:
926 file_name = "ip4_tunnels.html"
928 elif "ip4base" in item or "ip4scale" in item:
929 file_name = "ip4.html"
930 if "iacl" in item or "snat" in item or "cop" in item:
931 feature = "-features"
933 elif "ip6base" in item or "ip6scale" in item:
934 file_name = "ip6.html"
936 elif "l2xcbase" in item or "l2xcscale" in item \
937 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
938 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
939 file_name = "l2.html"
941 feature = "-features"
947 elif "xl710" in item:
956 elif "9000b" in item:
968 url = url + file_name + anchor + feature
970 ref = ET.SubElement(td, "a", attrib=dict(href=url))
977 with open(table["output-file"], 'w') as html_file:
978 logging.info(" Writing file: '{0}'".format(table["output-file"]))
979 html_file.write(".. raw:: html\n\n\t")
980 html_file.write(ET.tostring(dashboard))
981 html_file.write("\n\t<p><br><br></p>\n")
983 logging.warning("The output file is not defined.")