1 # Copyright (c) 2017 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
24 from errors import PresentationError
25 from utils import mean, stdev, relative_change
28 def generate_tables(spec, data):
29 """Generate all tables specified in the specification file.
31 :param spec: Specification read from the specification file.
32 :param data: Data to process.
33 :type spec: Specification
37 logging.info("Generating the tables ...")
38 for table in spec.tables:
40 eval(table["algorithm"])(table, data)
42 logging.error("The algorithm '{0}' is not defined.".
43 format(table["algorithm"]))
47 def table_details(table, input_data):
48 """Generate the table(s) with algorithm: table_detailed_test_results
49 specified in the specification file.
51 :param table: Table to generate.
52 :param input_data: Data to process.
53 :type table: pandas.Series
54 :type input_data: InputData
57 logging.info(" Generating the table {0} ...".
58 format(table.get("title", "")))
61 data = input_data.filter_data(table)
63 # Prepare the header of the tables
65 for column in table["columns"]:
66 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
68 # Generate the data for the table according to the model in the table
70 job = table["data"].keys()[0]
71 build = str(table["data"][job][0])
73 suites = input_data.suites(job, build)
75 logging.error(" No data available. The table will not be generated.")
78 for suite_longname, suite in suites.iteritems():
80 suite_name = suite["name"]
82 for test in data[job][build].keys():
83 if data[job][build][test]["parent"] in suite_name:
85 for column in table["columns"]:
87 col_data = str(data[job][build][test][column["data"].
88 split(" ")[1]]).replace('"', '""')
89 if column["data"].split(" ")[1] in ("vat-history",
91 col_data = replace(col_data, " |br| ", "",
93 col_data = " |prein| {0} |preout| ".\
95 row_lst.append('"{0}"'.format(col_data))
97 row_lst.append("No data")
98 table_lst.append(row_lst)
100 # Write the data to file
102 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
103 table["output-file-ext"])
104 logging.info(" Writing file: '{}'".format(file_name))
105 with open(file_name, "w") as file_handler:
106 file_handler.write(",".join(header) + "\n")
107 for item in table_lst:
108 file_handler.write(",".join(item) + "\n")
110 logging.info(" Done.")
113 def table_merged_details(table, input_data):
114 """Generate the table(s) with algorithm: table_merged_details
115 specified in the specification file.
117 :param table: Table to generate.
118 :param input_data: Data to process.
119 :type table: pandas.Series
120 :type input_data: InputData
123 logging.info(" Generating the table {0} ...".
124 format(table.get("title", "")))
127 data = input_data.filter_data(table)
128 data = input_data.merge_data(data)
129 data.sort_index(inplace=True)
131 suites = input_data.filter_data(table, data_set="suites")
132 suites = input_data.merge_data(suites)
134 # Prepare the header of the tables
136 for column in table["columns"]:
137 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
139 for _, suite in suites.iteritems():
141 suite_name = suite["name"]
143 for test in data.keys():
144 if data[test]["parent"] in suite_name:
146 for column in table["columns"]:
148 col_data = str(data[test][column["data"].
149 split(" ")[1]]).replace('"', '""')
150 if column["data"].split(" ")[1] in ("vat-history",
152 col_data = replace(col_data, " |br| ", "",
154 col_data = " |prein| {0} |preout| ".\
155 format(col_data[:-5])
156 row_lst.append('"{0}"'.format(col_data))
158 row_lst.append("No data")
159 table_lst.append(row_lst)
161 # Write the data to file
163 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
164 table["output-file-ext"])
165 logging.info(" Writing file: '{}'".format(file_name))
166 with open(file_name, "w") as file_handler:
167 file_handler.write(",".join(header) + "\n")
168 for item in table_lst:
169 file_handler.write(",".join(item) + "\n")
171 logging.info(" Done.")
174 def table_performance_improvements(table, input_data):
175 """Generate the table(s) with algorithm: table_performance_improvements
176 specified in the specification file.
178 :param table: Table to generate.
179 :param input_data: Data to process.
180 :type table: pandas.Series
181 :type input_data: InputData
184 def _write_line_to_file(file_handler, data):
185 """Write a line to the .csv file.
187 :param file_handler: File handler for the csv file. It must be open for
189 :param data: Item to be written to the file.
190 :type file_handler: BinaryIO
196 if isinstance(item["data"], str):
197 line_lst.append(item["data"])
198 elif isinstance(item["data"], float):
199 line_lst.append("{:.1f}".format(item["data"]))
200 elif item["data"] is None:
202 file_handler.write(",".join(line_lst) + "\n")
204 logging.info(" Generating the table {0} ...".
205 format(table.get("title", "")))
208 file_name = table.get("template", None)
211 tmpl = _read_csv_template(file_name)
212 except PresentationError:
213 logging.error(" The template '{0}' does not exist. Skipping the "
214 "table.".format(file_name))
217 logging.error("The template is not defined. Skipping the table.")
221 data = input_data.filter_data(table)
223 # Prepare the header of the tables
225 for column in table["columns"]:
226 header.append(column["title"])
228 # Generate the data for the table according to the model in the table
231 for tmpl_item in tmpl:
233 for column in table["columns"]:
234 cmd = column["data"].split(" ")[0]
235 args = column["data"].split(" ")[1:]
236 if cmd == "template":
238 val = float(tmpl_item[int(args[0])])
240 val = tmpl_item[int(args[0])]
241 tbl_item.append({"data": val})
247 for build in data[job]:
249 data_lst.append(float(build[tmpl_item[0]]
250 ["throughput"]["value"]))
251 except (KeyError, TypeError):
255 tbl_item.append({"data": (eval(operation)(data_lst)) /
258 tbl_item.append({"data": None})
259 elif cmd == "operation":
262 nr1 = float(tbl_item[int(args[1])]["data"])
263 nr2 = float(tbl_item[int(args[2])]["data"])
265 tbl_item.append({"data": eval(operation)(nr1, nr2)})
267 tbl_item.append({"data": None})
268 except (IndexError, ValueError, TypeError):
269 logging.error("No data for {0}".format(tbl_item[1]["data"]))
270 tbl_item.append({"data": None})
273 logging.error("Not supported command {0}. Skipping the table.".
276 tbl_lst.append(tbl_item)
278 # Sort the table according to the relative change
279 tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True)
281 # Create the tables and write them to the files
283 "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]),
284 "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]),
285 "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]),
286 "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"])
289 for file_name in file_names:
290 logging.info(" Writing the file '{0}'".format(file_name))
291 with open(file_name, "w") as file_handler:
292 file_handler.write(",".join(header) + "\n")
294 if isinstance(item[-1]["data"], float):
295 rel_change = round(item[-1]["data"], 1)
297 rel_change = item[-1]["data"]
298 if "ndr_top" in file_name \
299 and "ndr" in item[1]["data"] \
300 and rel_change >= 10.0:
301 _write_line_to_file(file_handler, item)
302 elif "pdr_top" in file_name \
303 and "pdr" in item[1]["data"] \
304 and rel_change >= 10.0:
305 _write_line_to_file(file_handler, item)
306 elif "ndr_low" in file_name \
307 and "ndr" in item[1]["data"] \
308 and rel_change < 10.0:
309 _write_line_to_file(file_handler, item)
310 elif "pdr_low" in file_name \
311 and "pdr" in item[1]["data"] \
312 and rel_change < 10.0:
313 _write_line_to_file(file_handler, item)
315 logging.info(" Done.")
318 def _read_csv_template(file_name):
319 """Read the template from a .csv file.
321 :param file_name: Name / full path / relative path of the file to read.
323 :returns: Data from the template as list (lines) of lists (items on line).
325 :raises: PresentationError if it is not possible to read the file.
329 with open(file_name, 'r') as csv_file:
331 for line in csv_file:
332 tmpl_data.append(line[:-1].split(","))
334 except IOError as err:
335 raise PresentationError(str(err), level="ERROR")
338 def table_performance_comparison(table, input_data):
339 """Generate the table(s) with algorithm: table_performance_comparison
340 specified in the specification file.
342 :param table: Table to generate.
343 :param input_data: Data to process.
344 :type table: pandas.Series
345 :type input_data: InputData
349 data = input_data.filter_data(table)
351 # Prepare the header of the tables
353 header = ["Test case",
354 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
355 "{0} stdev [Mpps]".format(table["reference"]["title"]),
356 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
357 "{0} stdev [Mpps]".format(table["compare"]["title"]),
359 header_str = ",".join(header) + "\n"
360 except (AttributeError, KeyError) as err:
361 logging.error("The model is invalid, missing parameter: {0}".
365 # Prepare data to the table:
367 for job, builds in table["reference"]["data"].items():
369 for tst_name, tst_data in data[job][str(build)].iteritems():
370 if tbl_dict.get(tst_name, None) is None:
371 name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
372 "-".join(tst_data["name"].
374 tbl_dict[tst_name] = {"name": name,
378 tbl_dict[tst_name]["ref-data"].\
379 append(tst_data["throughput"]["value"])
380 except TypeError as err:
382 logging.warning(tst_data)
384 for job, builds in table["compare"]["data"].items():
386 for tst_name, tst_data in data[job][str(build)].iteritems():
387 tbl_dict[tst_name]["cmp-data"].\
388 append(tst_data["throughput"]["value"])
391 for tst_name in tbl_dict.keys():
392 item = [tbl_dict[tst_name]["name"], ]
393 if tbl_dict[tst_name]["ref-data"]:
394 item.append(round(mean(tbl_dict[tst_name]["ref-data"]) / 1000000,
396 item.append(round(stdev(tbl_dict[tst_name]["ref-data"]) / 1000000,
399 item.extend([None, None])
400 if tbl_dict[tst_name]["cmp-data"]:
401 item.append(round(mean(tbl_dict[tst_name]["cmp-data"]) / 1000000,
403 item.append(round(stdev(tbl_dict[tst_name]["cmp-data"]) / 1000000,
406 item.extend([None, None])
407 if item[1] is not None and item[3] is not None:
408 item.append(int(relative_change(float(item[1]), float(item[3]))))
412 # Sort the table according to the relative change
413 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
417 tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"],
418 table["output-file-ext"]),
419 "{0}-ndr-2t2c-full{1}".format(table["output-file"],
420 table["output-file-ext"]),
421 "{0}-ndr-4t4c-full{1}".format(table["output-file"],
422 table["output-file-ext"]),
423 "{0}-pdr-1t1c-full{1}".format(table["output-file"],
424 table["output-file-ext"]),
425 "{0}-pdr-2t2c-full{1}".format(table["output-file"],
426 table["output-file-ext"]),
427 "{0}-pdr-4t4c-full{1}".format(table["output-file"],
428 table["output-file-ext"])
430 for file_name in tbl_names:
431 with open(file_name, "w") as file_handler:
432 file_handler.write(header_str)
434 if (file_name.split("-")[-3] in test[0] and # NDR vs PDR
435 file_name.split("-")[-2] in test[0]): # cores
436 test[0] = "-".join(test[0].split("-")[:-1])
437 file_handler.write(",".join([str(item) for item in test]) +
441 tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]),
442 "{0}-ndr-2t2c-full.txt".format(table["output-file"]),
443 "{0}-ndr-4t4c-full.txt".format(table["output-file"]),
444 "{0}-pdr-1t1c-full.txt".format(table["output-file"]),
445 "{0}-pdr-2t2c-full.txt".format(table["output-file"]),
446 "{0}-pdr-4t4c-full.txt".format(table["output-file"])
449 for i, txt_name in enumerate(tbl_names_txt):
451 with open(tbl_names[i], 'rb') as csv_file:
452 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
453 for row in csv_content:
454 if txt_table is None:
455 txt_table = prettytable.PrettyTable(row)
457 txt_table.add_row(row)
458 with open(txt_name, "w") as txt_file:
459 txt_file.write(str(txt_table))
461 # Selected tests in csv:
462 input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
463 table["output-file-ext"])
464 with open(input_file, "r") as in_file:
469 output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
470 table["output-file-ext"])
471 with open(output_file, "w") as out_file:
472 out_file.write(header_str)
473 for i, line in enumerate(lines[1:]):
474 if i == table["nr-of-tests-shown"]:
478 output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
479 table["output-file-ext"])
480 with open(output_file, "w") as out_file:
481 out_file.write(header_str)
482 for i, line in enumerate(lines[-1:0:-1]):
483 if i == table["nr-of-tests-shown"]:
487 input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"],
488 table["output-file-ext"])
489 with open(input_file, "r") as in_file:
494 output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
495 table["output-file-ext"])
496 with open(output_file, "w") as out_file:
497 out_file.write(header_str)
498 for i, line in enumerate(lines[1:]):
499 if i == table["nr-of-tests-shown"]:
503 output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
504 table["output-file-ext"])
505 with open(output_file, "w") as out_file:
506 out_file.write(header_str)
507 for i, line in enumerate(lines[-1:0:-1]):
508 if i == table["nr-of-tests-shown"]: