1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from string import replace
24 from math import isnan
26 from xml.etree import ElementTree as ET
28 from errors import PresentationError
29 from utils import mean, stdev, relative_change, remove_outliers, split_outliers
32 def generate_tables(spec, data):
33 """Generate all tables specified in the specification file.
35 :param spec: Specification read from the specification file.
36 :param data: Data to process.
37 :type spec: Specification
41 logging.info("Generating the tables ...")
42 for table in spec.tables:
44 eval(table["algorithm"])(table, data)
46 logging.error("The algorithm '{0}' is not defined.".
47 format(table["algorithm"]))
51 def table_details(table, input_data):
52 """Generate the table(s) with algorithm: table_detailed_test_results
53 specified in the specification file.
55 :param table: Table to generate.
56 :param input_data: Data to process.
57 :type table: pandas.Series
58 :type input_data: InputData
61 logging.info(" Generating the table {0} ...".
62 format(table.get("title", "")))
65 data = input_data.filter_data(table)
67 # Prepare the header of the tables
69 for column in table["columns"]:
70 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
72 # Generate the data for the table according to the model in the table
74 job = table["data"].keys()[0]
75 build = str(table["data"][job][0])
77 suites = input_data.suites(job, build)
79 logging.error(" No data available. The table will not be generated.")
82 for suite_longname, suite in suites.iteritems():
84 suite_name = suite["name"]
86 for test in data[job][build].keys():
87 if data[job][build][test]["parent"] in suite_name:
89 for column in table["columns"]:
91 col_data = str(data[job][build][test][column["data"].
92 split(" ")[1]]).replace('"', '""')
93 if column["data"].split(" ")[1] in ("vat-history",
95 col_data = replace(col_data, " |br| ", "",
97 col_data = " |prein| {0} |preout| ".\
99 row_lst.append('"{0}"'.format(col_data))
101 row_lst.append("No data")
102 table_lst.append(row_lst)
104 # Write the data to file
106 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
107 table["output-file-ext"])
108 logging.info(" Writing file: '{}'".format(file_name))
109 with open(file_name, "w") as file_handler:
110 file_handler.write(",".join(header) + "\n")
111 for item in table_lst:
112 file_handler.write(",".join(item) + "\n")
114 logging.info(" Done.")
117 def table_merged_details(table, input_data):
118 """Generate the table(s) with algorithm: table_merged_details
119 specified in the specification file.
121 :param table: Table to generate.
122 :param input_data: Data to process.
123 :type table: pandas.Series
124 :type input_data: InputData
127 logging.info(" Generating the table {0} ...".
128 format(table.get("title", "")))
131 data = input_data.filter_data(table)
132 data = input_data.merge_data(data)
133 data.sort_index(inplace=True)
135 suites = input_data.filter_data(table, data_set="suites")
136 suites = input_data.merge_data(suites)
138 # Prepare the header of the tables
140 for column in table["columns"]:
141 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
143 for _, suite in suites.iteritems():
145 suite_name = suite["name"]
147 for test in data.keys():
148 if data[test]["parent"] in suite_name:
150 for column in table["columns"]:
152 col_data = str(data[test][column["data"].
153 split(" ")[1]]).replace('"', '""')
154 if column["data"].split(" ")[1] in ("vat-history",
156 col_data = replace(col_data, " |br| ", "",
158 col_data = " |prein| {0} |preout| ".\
159 format(col_data[:-5])
160 row_lst.append('"{0}"'.format(col_data))
162 row_lst.append("No data")
163 table_lst.append(row_lst)
165 # Write the data to file
167 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
168 table["output-file-ext"])
169 logging.info(" Writing file: '{}'".format(file_name))
170 with open(file_name, "w") as file_handler:
171 file_handler.write(",".join(header) + "\n")
172 for item in table_lst:
173 file_handler.write(",".join(item) + "\n")
175 logging.info(" Done.")
178 def table_performance_improvements(table, input_data):
179 """Generate the table(s) with algorithm: table_performance_improvements
180 specified in the specification file.
182 :param table: Table to generate.
183 :param input_data: Data to process.
184 :type table: pandas.Series
185 :type input_data: InputData
188 def _write_line_to_file(file_handler, data):
189 """Write a line to the .csv file.
191 :param file_handler: File handler for the csv file. It must be open for
193 :param data: Item to be written to the file.
194 :type file_handler: BinaryIO
200 if isinstance(item["data"], str):
201 # Remove -?drdisc from the end
202 if item["data"].endswith("drdisc"):
203 item["data"] = item["data"][:-8]
204 line_lst.append(item["data"])
205 elif isinstance(item["data"], float):
206 line_lst.append("{:.1f}".format(item["data"]))
207 elif item["data"] is None:
209 file_handler.write(",".join(line_lst) + "\n")
211 logging.info(" Generating the table {0} ...".
212 format(table.get("title", "")))
215 file_name = table.get("template", None)
218 tmpl = _read_csv_template(file_name)
219 except PresentationError:
220 logging.error(" The template '{0}' does not exist. Skipping the "
221 "table.".format(file_name))
224 logging.error("The template is not defined. Skipping the table.")
228 data = input_data.filter_data(table)
230 # Prepare the header of the tables
232 for column in table["columns"]:
233 header.append(column["title"])
235 # Generate the data for the table according to the model in the table
238 for tmpl_item in tmpl:
240 for column in table["columns"]:
241 cmd = column["data"].split(" ")[0]
242 args = column["data"].split(" ")[1:]
243 if cmd == "template":
245 val = float(tmpl_item[int(args[0])])
247 val = tmpl_item[int(args[0])]
248 tbl_item.append({"data": val})
254 for build in data[job]:
256 data_lst.append(float(build[tmpl_item[0]]
257 ["throughput"]["value"]))
258 except (KeyError, TypeError):
262 tbl_item.append({"data": (eval(operation)(data_lst)) /
265 tbl_item.append({"data": None})
266 elif cmd == "operation":
269 nr1 = float(tbl_item[int(args[1])]["data"])
270 nr2 = float(tbl_item[int(args[2])]["data"])
272 tbl_item.append({"data": eval(operation)(nr1, nr2)})
274 tbl_item.append({"data": None})
275 except (IndexError, ValueError, TypeError):
276 logging.error("No data for {0}".format(tbl_item[0]["data"]))
277 tbl_item.append({"data": None})
280 logging.error("Not supported command {0}. Skipping the table.".
283 tbl_lst.append(tbl_item)
285 # Sort the table according to the relative change
286 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
288 # Create the tables and write them to the files
290 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
291 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
292 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
293 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
296 for file_name in file_names:
297 logging.info(" Writing the file '{0}'".format(file_name))
298 with open(file_name, "w") as file_handler:
299 file_handler.write(",".join(header) + "\n")
301 if isinstance(item[-1]["data"], float):
302 rel_change = round(item[-1]["data"], 1)
304 rel_change = item[-1]["data"]
305 if "ndr_top" in file_name \
306 and "ndr" in item[0]["data"] \
307 and rel_change >= 10.0:
308 _write_line_to_file(file_handler, item)
309 elif "pdr_top" in file_name \
310 and "pdr" in item[0]["data"] \
311 and rel_change >= 10.0:
312 _write_line_to_file(file_handler, item)
313 elif "ndr_low" in file_name \
314 and "ndr" in item[0]["data"] \
315 and rel_change < 10.0:
316 _write_line_to_file(file_handler, item)
317 elif "pdr_low" in file_name \
318 and "pdr" in item[0]["data"] \
319 and rel_change < 10.0:
320 _write_line_to_file(file_handler, item)
322 logging.info(" Done.")
325 def _read_csv_template(file_name):
326 """Read the template from a .csv file.
328 :param file_name: Name / full path / relative path of the file to read.
330 :returns: Data from the template as list (lines) of lists (items on line).
332 :raises: PresentationError if it is not possible to read the file.
336 with open(file_name, 'r') as csv_file:
338 for line in csv_file:
339 tmpl_data.append(line[:-1].split(","))
341 except IOError as err:
342 raise PresentationError(str(err), level="ERROR")
345 def table_performance_comparison(table, input_data):
346 """Generate the table(s) with algorithm: table_performance_comparison
347 specified in the specification file.
349 :param table: Table to generate.
350 :param input_data: Data to process.
351 :type table: pandas.Series
352 :type input_data: InputData
355 logging.info(" Generating the table {0} ...".
356 format(table.get("title", "")))
359 data = input_data.filter_data(table, continue_on_error=True)
361 # Prepare the header of the tables
363 header = ["Test case",
364 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
365 "{0} stdev [Mpps]".format(table["reference"]["title"]),
366 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
367 "{0} stdev [Mpps]".format(table["compare"]["title"]),
369 header_str = ",".join(header) + "\n"
370 except (AttributeError, KeyError) as err:
371 logging.error("The model is invalid, missing parameter: {0}".
375 # Prepare data to the table:
377 for job, builds in table["reference"]["data"].items():
379 for tst_name, tst_data in data[job][str(build)].iteritems():
380 if tbl_dict.get(tst_name, None) is None:
381 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
382 "-".join(tst_data["name"].
384 tbl_dict[tst_name] = {"name": name,
388 tbl_dict[tst_name]["ref-data"].\
389 append(tst_data["throughput"]["value"])
391 pass # No data in output.xml for this test
393 for job, builds in table["compare"]["data"].items():
395 for tst_name, tst_data in data[job][str(build)].iteritems():
397 tbl_dict[tst_name]["cmp-data"].\
398 append(tst_data["throughput"]["value"])
402 tbl_dict.pop(tst_name, None)
405 for tst_name in tbl_dict.keys():
406 item = [tbl_dict[tst_name]["name"], ]
407 if tbl_dict[tst_name]["ref-data"]:
408 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
409 outlier_const=table["outlier-const"])
410 # TODO: Specify window size.
412 item.append(round(mean(data_t) / 1000000, 2))
413 item.append(round(stdev(data_t) / 1000000, 2))
415 item.extend([None, None])
417 item.extend([None, None])
418 if tbl_dict[tst_name]["cmp-data"]:
419 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
420 outlier_const=table["outlier-const"])
421 # TODO: Specify window size.
423 item.append(round(mean(data_t) / 1000000, 2))
424 item.append(round(stdev(data_t) / 1000000, 2))
426 item.extend([None, None])
428 item.extend([None, None])
429 if item[1] is not None and item[3] is not None:
430 item.append(int(relative_change(float(item[1]), float(item[3]))))
434 # Sort the table according to the relative change
435 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
439 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
440 table["output-file-ext"]),
441 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
442 table["output-file-ext"]),
443 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
444 table["output-file-ext"]),
445 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
446 table["output-file-ext"]),
447 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
448 table["output-file-ext"]),
449 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
450 table["output-file-ext"])
452 for file_name in tbl_names:
453 logging.info(" Writing file: '{0}'".format(file_name))
454 with open(file_name, "w") as file_handler:
455 file_handler.write(header_str)
457 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
458 file_name.split("-")[-2] in test[0]): # cores
459 test[0] = "-".join(test[0].split("-")[:-1])
460 file_handler.write(",".join([str(item) for item in test]) +
464 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
465 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
466 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
467 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
468 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
469 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
472 for i, txt_name in enumerate(tbl_names_txt):
474 logging.info(" Writing file: '{0}'".format(txt_name))
475 with open(tbl_names[i], 'rb') as csv_file:
476 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
477 for row in csv_content:
478 if txt_table is None:
479 txt_table = prettytable.PrettyTable(row)
481 txt_table.add_row(row)
482 txt_table.align["Test case"] = "l"
483 with open(txt_name, "w") as txt_file:
484 txt_file.write(str(txt_table))
486 # Selected tests in csv:
487 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
488 table["output-file-ext"])
489 with open(input_file, "r") as in_file:
494 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
495 table["output-file-ext"])
496 logging.info(" Writing file: '{0}'".format(output_file))
497 with open(output_file, "w") as out_file:
498 out_file.write(header_str)
499 for i, line in enumerate(lines[1:]):
500 if i == table["nr-of-tests-shown"]:
504 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
505 table["output-file-ext"])
506 logging.info(" Writing file: '{0}'".format(output_file))
507 with open(output_file, "w") as out_file:
508 out_file.write(header_str)
509 for i, line in enumerate(lines[-1:0:-1]):
510 if i == table["nr-of-tests-shown"]:
514 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
515 table["output-file-ext"])
516 with open(input_file, "r") as in_file:
521 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
522 table["output-file-ext"])
523 logging.info(" Writing file: '{0}'".format(output_file))
524 with open(output_file, "w") as out_file:
525 out_file.write(header_str)
526 for i, line in enumerate(lines[1:]):
527 if i == table["nr-of-tests-shown"]:
531 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
532 table["output-file-ext"])
533 logging.info(" Writing file: '{0}'".format(output_file))
534 with open(output_file, "w") as out_file:
535 out_file.write(header_str)
536 for i, line in enumerate(lines[-1:0:-1]):
537 if i == table["nr-of-tests-shown"]:
542 def table_performance_comparison_mrr(table, input_data):
543 """Generate the table(s) with algorithm: table_performance_comparison_mrr
544 specified in the specification file.
546 :param table: Table to generate.
547 :param input_data: Data to process.
548 :type table: pandas.Series
549 :type input_data: InputData
552 logging.info(" Generating the table {0} ...".
553 format(table.get("title", "")))
556 data = input_data.filter_data(table, continue_on_error=True)
558 # Prepare the header of the tables
560 header = ["Test case",
561 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
562 "{0} stdev [Mpps]".format(table["reference"]["title"]),
563 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
564 "{0} stdev [Mpps]".format(table["compare"]["title"]),
566 header_str = ",".join(header) + "\n"
567 except (AttributeError, KeyError) as err:
568 logging.error("The model is invalid, missing parameter: {0}".
572 # Prepare data to the table:
574 for job, builds in table["reference"]["data"].items():
576 for tst_name, tst_data in data[job][str(build)].iteritems():
577 if tbl_dict.get(tst_name, None) is None:
578 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
579 "-".join(tst_data["name"].
581 tbl_dict[tst_name] = {"name": name,
585 tbl_dict[tst_name]["ref-data"].\
586 append(tst_data["result"]["throughput"])
588 pass # No data in output.xml for this test
590 for job, builds in table["compare"]["data"].items():
592 for tst_name, tst_data in data[job][str(build)].iteritems():
594 tbl_dict[tst_name]["cmp-data"].\
595 append(tst_data["result"]["throughput"])
599 tbl_dict.pop(tst_name, None)
602 for tst_name in tbl_dict.keys():
603 item = [tbl_dict[tst_name]["name"], ]
604 if tbl_dict[tst_name]["ref-data"]:
605 data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
606 outlier_const=table["outlier-const"])
607 # TODO: Specify window size.
609 item.append(round(mean(data_t) / 1000000, 2))
610 item.append(round(stdev(data_t) / 1000000, 2))
612 item.extend([None, None])
614 item.extend([None, None])
615 if tbl_dict[tst_name]["cmp-data"]:
616 data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
617 outlier_const=table["outlier-const"])
618 # TODO: Specify window size.
620 item.append(round(mean(data_t) / 1000000, 2))
621 item.append(round(stdev(data_t) / 1000000, 2))
623 item.extend([None, None])
625 item.extend([None, None])
626 if item[1] is not None and item[3] is not None and item[1] != 0:
627 item.append(int(relative_change(float(item[1]), float(item[3]))))
631 # Sort the table according to the relative change
632 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
636 tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
637 table["output-file-ext"]),
638 "{0}-2t2c-full{1}".format(table["output-file"],
639 table["output-file-ext"]),
640 "{0}-4t4c-full{1}".format(table["output-file"],
641 table["output-file-ext"])
643 for file_name in tbl_names:
644 logging.info(" Writing file: '{0}'".format(file_name))
645 with open(file_name, "w") as file_handler:
646 file_handler.write(header_str)
648 if file_name.split("-")[-2] in test[0]: # cores
649 test[0] = "-".join(test[0].split("-")[:-1])
650 file_handler.write(",".join([str(item) for item in test]) +
654 tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
655 "{0}-2t2c-full.txt".format(table["output-file"]),
656 "{0}-4t4c-full.txt".format(table["output-file"])
659 for i, txt_name in enumerate(tbl_names_txt):
661 logging.info(" Writing file: '{0}'".format(txt_name))
662 with open(tbl_names[i], 'rb') as csv_file:
663 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
664 for row in csv_content:
665 if txt_table is None:
666 txt_table = prettytable.PrettyTable(row)
668 txt_table.add_row(row)
669 txt_table.align["Test case"] = "l"
670 with open(txt_name, "w") as txt_file:
671 txt_file.write(str(txt_table))
674 def table_performance_trending_dashboard(table, input_data):
675 """Generate the table(s) with algorithm: table_performance_comparison
676 specified in the specification file.
678 :param table: Table to generate.
679 :param input_data: Data to process.
680 :type table: pandas.Series
681 :type input_data: InputData
684 logging.info(" Generating the table {0} ...".
685 format(table.get("title", "")))
688 data = input_data.filter_data(table, continue_on_error=True)
690 # Prepare the header of the tables
691 header = ["Test Case",
693 "Short-Term Change [%]",
694 "Long-Term Change [%]",
699 header_str = ",".join(header) + "\n"
701 # Prepare data to the table:
703 for job, builds in table["data"].items():
705 for tst_name, tst_data in data[job][str(build)].iteritems():
706 if tbl_dict.get(tst_name, None) is None:
707 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
708 "-".join(tst_data["name"].
710 tbl_dict[tst_name] = {"name": name,
713 tbl_dict[tst_name]["data"][str(build)] = \
714 tst_data["result"]["throughput"]
715 except (TypeError, KeyError):
716 pass # No data in output.xml for this test
719 for tst_name in tbl_dict.keys():
720 if len(tbl_dict[tst_name]["data"]) > 2:
722 pd_data = pd.Series(tbl_dict[tst_name]["data"])
723 last_key = pd_data.keys()[-1]
724 win_size = min(pd_data.size, table["window"])
725 key_14 = pd_data.keys()[-(pd_data.size - win_size)]
726 long_win_size = min(pd_data.size, table["long-trend-window"])
728 data_t, _ = split_outliers(pd_data, outlier_const=1.5,
731 median_t = data_t.rolling(window=win_size, min_periods=2).median()
732 stdev_t = data_t.rolling(window=win_size, min_periods=2).std()
733 median_idx = pd_data.size - long_win_size
735 max_median = max([x for x in median_t.values[median_idx:]
740 last_median_t = median_t[last_key]
744 median_t_14 = median_t[key_14]
749 name = tbl_dict[tst_name]["name"]
751 # Classification list:
752 classification_lst = list()
753 for build_nr, value in pd_data.iteritems():
755 if isnan(data_t[build_nr]) \
756 or isnan(median_t[build_nr]) \
757 or isnan(stdev_t[build_nr]) \
759 classification_lst.append("outlier")
760 elif value < (median_t[build_nr] - 3 * stdev_t[build_nr]):
761 classification_lst.append("regression")
762 elif value > (median_t[build_nr] + 3 * stdev_t[build_nr]):
763 classification_lst.append("progression")
765 classification_lst.append("normal")
767 if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0:
768 rel_change_last = nan
770 rel_change_last = round(
771 (last_median_t - median_t_14) / median_t_14, 2)
773 if isnan(max_median) or isnan(last_median_t) or max_median == 0:
774 rel_change_long = nan
776 rel_change_long = round(
777 (last_median_t - max_median) / max_median, 2)
779 tbl_lst.append([name,
780 '-' if isnan(last_median_t) else
781 round(last_median_t / 1000000, 2),
782 '-' if isnan(rel_change_last) else rel_change_last,
783 '-' if isnan(rel_change_long) else rel_change_long,
784 classification_lst[win_size:].count("regression"),
785 classification_lst[win_size:].count("progression"),
786 classification_lst[win_size:].count("outlier")])
788 tbl_lst.sort(key=lambda rel: rel[0])
791 for nrr in range(table["window"], -1, -1):
792 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
793 for nrp in range(table["window"], -1, -1):
794 tbl_pro = [item for item in tbl_reg if item[5] == nrp]
795 for nro in range(table["window"], -1, -1):
796 tbl_out = [item for item in tbl_pro if item[5] == nro]
797 tbl_sorted.extend(tbl_out)
799 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
801 logging.info(" Writing file: '{0}'".format(file_name))
802 with open(file_name, "w") as file_handler:
803 file_handler.write(header_str)
804 for test in tbl_sorted:
805 file_handler.write(",".join([str(item) for item in test]) + '\n')
807 txt_file_name = "{0}.txt".format(table["output-file"])
809 logging.info(" Writing file: '{0}'".format(txt_file_name))
810 with open(file_name, 'rb') as csv_file:
811 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
812 for row in csv_content:
813 if txt_table is None:
814 txt_table = prettytable.PrettyTable(row)
816 txt_table.add_row(row)
817 txt_table.align["Test case"] = "l"
818 with open(txt_file_name, "w") as txt_file:
819 txt_file.write(str(txt_table))
822 def table_performance_trending_dashboard_html(table, input_data):
823 """Generate the table(s) with algorithm:
824 table_performance_trending_dashboard_html specified in the specification
827 :param table: Table to generate.
828 :param input_data: Data to process.
829 :type table: pandas.Series
830 :type input_data: InputData
833 logging.info(" Generating the table {0} ...".
834 format(table.get("title", "")))
837 with open(table["input-file"], 'rb') as csv_file:
838 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
839 csv_lst = [item for item in csv_content]
841 logging.warning("The input file is not defined.")
843 except csv.Error as err:
844 logging.warning("Not possible to process the file '{0}'.\n{1}".
845 format(table["input-file"], err))
849 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
852 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
853 for idx, item in enumerate(csv_lst[0]):
854 alignment = "left" if idx == 0 else "center"
855 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
859 for r_idx, row in enumerate(csv_lst[1:]):
860 background = "#D4E4F7" if r_idx % 2 else "white"
861 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
864 for c_idx, item in enumerate(row):
865 alignment = "left" if c_idx == 0 else "center"
866 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
874 file_name = "container_memif.html"
876 elif "vhost" in item:
877 if "l2xcbase" in item or "l2bdbasemaclrn" in item:
878 file_name = "vm_vhost_l2.html"
879 elif "ip4base" in item:
880 file_name = "vm_vhost_ip4.html"
882 elif "ipsec" in item:
883 file_name = "ipsec.html"
885 elif "ethip4lispip" in item or "ethip4vxlan" in item:
886 file_name = "ip4_tunnels.html"
888 elif "ip4base" in item or "ip4scale" in item:
889 file_name = "ip4.html"
890 if "iacl" in item or "snat" in item or "cop" in item:
891 feature = "-features"
893 elif "ip6base" in item or "ip6scale" in item:
894 file_name = "ip6.html"
896 elif "l2xcbase" in item or "l2xcscale" in item \
897 or "l2bdbasemaclrn" in item or "l2bdscale" in item \
898 or "l2dbbasemaclrn" in item or "l2dbscale" in item:
899 file_name = "l2.html"
901 feature = "-features"
907 elif "xl710" in item:
916 elif "9000b" in item:
928 url = url + file_name + anchor + feature
930 ref = ET.SubElement(td, "a", attrib=dict(href=url))
937 with open(table["output-file"], 'w') as html_file:
938 logging.info(" Writing file: '{0}'".
939 format(table["output-file"]))
940 html_file.write(".. raw:: html\n\n\t")
941 html_file.write(ET.tostring(dashboard))
942 html_file.write("\n\t<p><br><br></p>\n")
944 logging.warning("The output file is not defined.")