1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def table_performance_comparison(table, input_data):
191 """Generate the table(s) with algorithm: table_performance_comparison
192 specified in the specification file.
194 :param table: Table to generate.
195 :param input_data: Data to process.
196 :type table: pandas.Series
197 :type input_data: InputData
200 logging.info(" Generating the table {0} ...".
201 format(table.get("title", "")))
204 logging.info(" Creating the data set for the {0} '{1}'.".
205 format(table.get("type", ""), table.get("title", "")))
206 data = input_data.filter_data(table, continue_on_error=True)
208 # Prepare the header of the tables
210 header = ["Test case", ]
212 if table["include-tests"] == "MRR":
213 hdr_param = "Receive Rate"
215 hdr_param = "Throughput"
217 history = table.get("history", None)
221 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222 "{0} Stdev [Mpps]".format(item["title"])])
224 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
229 header_str = ",".join(header) + "\n"
230 except (AttributeError, KeyError) as err:
231 logging.error("The model is invalid, missing parameter: {0}".
235 # Prepare data to the table:
237 for job, builds in table["reference"]["data"].items():
239 for tst_name, tst_data in data[job][str(build)].iteritems():
240 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241 replace("-ndrpdr", "").replace("-pdrdisc", "").\
242 replace("-ndrdisc", "").replace("-pdr", "").\
243 replace("-ndr", "").\
244 replace("1t1c", "1c").replace("2t1c", "1c").\
245 replace("2t2c", "2c").replace("4t2c", "2c").\
246 replace("4t4c", "4c").replace("8t4c", "4c")
247 if "across topologies" in table["title"].lower():
248 tst_name_mod = tst_name_mod.replace("2n1l-", "")
249 if tbl_dict.get(tst_name_mod, None) is None:
250 groups = re.search(REGEX_NIC, tst_data["parent"])
251 nic = groups.group(0) if groups else ""
252 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
254 if "across testbeds" in table["title"].lower() or \
255 "across topologies" in table["title"].lower():
257 replace("1t1c", "1c").replace("2t1c", "1c").\
258 replace("2t2c", "2c").replace("4t2c", "2c").\
259 replace("4t4c", "4c").replace("8t4c", "4c")
260 tbl_dict[tst_name_mod] = {"name": name,
264 # TODO: Re-work when NDRPDRDISC tests are not used
265 if table["include-tests"] == "MRR":
266 tbl_dict[tst_name_mod]["ref-data"]. \
267 append(tst_data["result"]["receive-rate"].avg)
268 elif table["include-tests"] == "PDR":
269 if tst_data["type"] == "PDR":
270 tbl_dict[tst_name_mod]["ref-data"]. \
271 append(tst_data["throughput"]["value"])
272 elif tst_data["type"] == "NDRPDR":
273 tbl_dict[tst_name_mod]["ref-data"].append(
274 tst_data["throughput"]["PDR"]["LOWER"])
275 elif table["include-tests"] == "NDR":
276 if tst_data["type"] == "NDR":
277 tbl_dict[tst_name_mod]["ref-data"]. \
278 append(tst_data["throughput"]["value"])
279 elif tst_data["type"] == "NDRPDR":
280 tbl_dict[tst_name_mod]["ref-data"].append(
281 tst_data["throughput"]["NDR"]["LOWER"])
285 pass # No data in output.xml for this test
287 for job, builds in table["compare"]["data"].items():
289 for tst_name, tst_data in data[job][str(build)].iteritems():
290 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292 replace("-ndrdisc", "").replace("-pdr", ""). \
293 replace("-ndr", "").\
294 replace("1t1c", "1c").replace("2t1c", "1c").\
295 replace("2t2c", "2c").replace("4t2c", "2c").\
296 replace("4t4c", "4c").replace("8t4c", "4c")
297 if "across topologies" in table["title"].lower():
298 tst_name_mod = tst_name_mod.replace("2n1l-", "")
299 if tbl_dict.get(tst_name_mod, None) is None:
300 groups = re.search(REGEX_NIC, tst_data["parent"])
301 nic = groups.group(0) if groups else ""
302 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
304 if "across testbeds" in table["title"].lower() or \
305 "across topologies" in table["title"].lower():
307 replace("1t1c", "1c").replace("2t1c", "1c").\
308 replace("2t2c", "2c").replace("4t2c", "2c").\
309 replace("4t4c", "4c").replace("8t4c", "4c")
310 tbl_dict[tst_name_mod] = {"name": name,
314 # TODO: Re-work when NDRPDRDISC tests are not used
315 if table["include-tests"] == "MRR":
316 tbl_dict[tst_name_mod]["cmp-data"]. \
317 append(tst_data["result"]["receive-rate"].avg)
318 elif table["include-tests"] == "PDR":
319 if tst_data["type"] == "PDR":
320 tbl_dict[tst_name_mod]["cmp-data"]. \
321 append(tst_data["throughput"]["value"])
322 elif tst_data["type"] == "NDRPDR":
323 tbl_dict[tst_name_mod]["cmp-data"].append(
324 tst_data["throughput"]["PDR"]["LOWER"])
325 elif table["include-tests"] == "NDR":
326 if tst_data["type"] == "NDR":
327 tbl_dict[tst_name_mod]["cmp-data"]. \
328 append(tst_data["throughput"]["value"])
329 elif tst_data["type"] == "NDRPDR":
330 tbl_dict[tst_name_mod]["cmp-data"].append(
331 tst_data["throughput"]["NDR"]["LOWER"])
334 except (KeyError, TypeError):
338 for job, builds in item["data"].items():
340 for tst_name, tst_data in data[job][str(build)].iteritems():
341 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
342 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
343 replace("-ndrdisc", "").replace("-pdr", ""). \
344 replace("-ndr", "").\
345 replace("1t1c", "1c").replace("2t1c", "1c").\
346 replace("2t2c", "2c").replace("4t2c", "2c").\
347 replace("4t4c", "4c").replace("8t4c", "4c")
348 if "across topologies" in table["title"].lower():
349 tst_name_mod = tst_name_mod.replace("2n1l-", "")
350 if tbl_dict.get(tst_name_mod, None) is None:
352 if tbl_dict[tst_name_mod].get("history", None) is None:
353 tbl_dict[tst_name_mod]["history"] = OrderedDict()
354 if tbl_dict[tst_name_mod]["history"].get(item["title"],
356 tbl_dict[tst_name_mod]["history"][item["title"]] = \
359 # TODO: Re-work when NDRPDRDISC tests are not used
360 if table["include-tests"] == "MRR":
361 tbl_dict[tst_name_mod]["history"][item["title"
362 ]].append(tst_data["result"]["receive-rate"].
364 elif table["include-tests"] == "PDR":
365 if tst_data["type"] == "PDR":
366 tbl_dict[tst_name_mod]["history"][
368 append(tst_data["throughput"]["value"])
369 elif tst_data["type"] == "NDRPDR":
370 tbl_dict[tst_name_mod]["history"][item[
371 "title"]].append(tst_data["throughput"][
373 elif table["include-tests"] == "NDR":
374 if tst_data["type"] == "NDR":
375 tbl_dict[tst_name_mod]["history"][
377 append(tst_data["throughput"]["value"])
378 elif tst_data["type"] == "NDRPDR":
379 tbl_dict[tst_name_mod]["history"][item[
380 "title"]].append(tst_data["throughput"][
384 except (TypeError, KeyError):
388 for tst_name in tbl_dict.keys():
389 item = [tbl_dict[tst_name]["name"], ]
391 if tbl_dict[tst_name].get("history", None) is not None:
392 for hist_data in tbl_dict[tst_name]["history"].values():
394 item.append(round(mean(hist_data) / 1000000, 2))
395 item.append(round(stdev(hist_data) / 1000000, 2))
397 item.extend([None, None])
399 item.extend([None, None])
400 data_t = tbl_dict[tst_name]["ref-data"]
402 item.append(round(mean(data_t) / 1000000, 2))
403 item.append(round(stdev(data_t) / 1000000, 2))
405 item.extend([None, None])
406 data_t = tbl_dict[tst_name]["cmp-data"]
408 item.append(round(mean(data_t) / 1000000, 2))
409 item.append(round(stdev(data_t) / 1000000, 2))
411 item.extend([None, None])
412 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
413 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
416 if len(item) == len(header):
419 # Sort the table according to the relative change
420 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
422 # Generate csv tables:
423 csv_file = "{0}.csv".format(table["output-file"])
424 with open(csv_file, "w") as file_handler:
425 file_handler.write(header_str)
427 file_handler.write(",".join([str(item) for item in test]) + "\n")
429 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
432 def table_performance_comparison_nic(table, input_data):
433 """Generate the table(s) with algorithm: table_performance_comparison
434 specified in the specification file.
436 :param table: Table to generate.
437 :param input_data: Data to process.
438 :type table: pandas.Series
439 :type input_data: InputData
442 logging.info(" Generating the table {0} ...".
443 format(table.get("title", "")))
446 logging.info(" Creating the data set for the {0} '{1}'.".
447 format(table.get("type", ""), table.get("title", "")))
448 data = input_data.filter_data(table, continue_on_error=True)
450 # Prepare the header of the tables
452 header = ["Test case", ]
454 if table["include-tests"] == "MRR":
455 hdr_param = "Receive Rate"
457 hdr_param = "Throughput"
459 history = table.get("history", None)
463 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
464 "{0} Stdev [Mpps]".format(item["title"])])
466 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
467 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
468 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
469 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
471 header_str = ",".join(header) + "\n"
472 except (AttributeError, KeyError) as err:
473 logging.error("The model is invalid, missing parameter: {0}".
477 # Prepare data to the table:
479 for job, builds in table["reference"]["data"].items():
481 for tst_name, tst_data in data[job][str(build)].iteritems():
482 if table["reference"]["nic"] not in tst_data["tags"]:
484 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
485 replace("-ndrpdr", "").replace("-pdrdisc", "").\
486 replace("-ndrdisc", "").replace("-pdr", "").\
487 replace("-ndr", "").\
488 replace("1t1c", "1c").replace("2t1c", "1c").\
489 replace("2t2c", "2c").replace("4t2c", "2c").\
490 replace("4t4c", "4c").replace("8t4c", "4c")
491 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
492 if "across topologies" in table["title"].lower():
493 tst_name_mod = tst_name_mod.replace("2n1l-", "")
494 if tbl_dict.get(tst_name_mod, None) is None:
495 name = "{0}".format("-".join(tst_data["name"].
497 if "across testbeds" in table["title"].lower() or \
498 "across topologies" in table["title"].lower():
500 replace("1t1c", "1c").replace("2t1c", "1c").\
501 replace("2t2c", "2c").replace("4t2c", "2c").\
502 replace("4t4c", "4c").replace("8t4c", "4c")
503 tbl_dict[tst_name_mod] = {"name": name,
507 # TODO: Re-work when NDRPDRDISC tests are not used
508 if table["include-tests"] == "MRR":
509 tbl_dict[tst_name_mod]["ref-data"]. \
510 append(tst_data["result"]["receive-rate"].avg)
511 elif table["include-tests"] == "PDR":
512 if tst_data["type"] == "PDR":
513 tbl_dict[tst_name_mod]["ref-data"]. \
514 append(tst_data["throughput"]["value"])
515 elif tst_data["type"] == "NDRPDR":
516 tbl_dict[tst_name_mod]["ref-data"].append(
517 tst_data["throughput"]["PDR"]["LOWER"])
518 elif table["include-tests"] == "NDR":
519 if tst_data["type"] == "NDR":
520 tbl_dict[tst_name_mod]["ref-data"]. \
521 append(tst_data["throughput"]["value"])
522 elif tst_data["type"] == "NDRPDR":
523 tbl_dict[tst_name_mod]["ref-data"].append(
524 tst_data["throughput"]["NDR"]["LOWER"])
528 pass # No data in output.xml for this test
530 for job, builds in table["compare"]["data"].items():
532 for tst_name, tst_data in data[job][str(build)].iteritems():
533 if table["compare"]["nic"] not in tst_data["tags"]:
535 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
536 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
537 replace("-ndrdisc", "").replace("-pdr", ""). \
538 replace("-ndr", "").\
539 replace("1t1c", "1c").replace("2t1c", "1c").\
540 replace("2t2c", "2c").replace("4t2c", "2c").\
541 replace("4t4c", "4c").replace("8t4c", "4c")
542 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
543 if "across topologies" in table["title"].lower():
544 tst_name_mod = tst_name_mod.replace("2n1l-", "")
545 if tbl_dict.get(tst_name_mod, None) is None:
546 name = "{0}".format("-".join(tst_data["name"].
548 if "across testbeds" in table["title"].lower() or \
549 "across topologies" in table["title"].lower():
551 replace("1t1c", "1c").replace("2t1c", "1c").\
552 replace("2t2c", "2c").replace("4t2c", "2c").\
553 replace("4t4c", "4c").replace("8t4c", "4c")
554 tbl_dict[tst_name_mod] = {"name": name,
558 # TODO: Re-work when NDRPDRDISC tests are not used
559 if table["include-tests"] == "MRR":
560 tbl_dict[tst_name_mod]["cmp-data"]. \
561 append(tst_data["result"]["receive-rate"].avg)
562 elif table["include-tests"] == "PDR":
563 if tst_data["type"] == "PDR":
564 tbl_dict[tst_name_mod]["cmp-data"]. \
565 append(tst_data["throughput"]["value"])
566 elif tst_data["type"] == "NDRPDR":
567 tbl_dict[tst_name_mod]["cmp-data"].append(
568 tst_data["throughput"]["PDR"]["LOWER"])
569 elif table["include-tests"] == "NDR":
570 if tst_data["type"] == "NDR":
571 tbl_dict[tst_name_mod]["cmp-data"]. \
572 append(tst_data["throughput"]["value"])
573 elif tst_data["type"] == "NDRPDR":
574 tbl_dict[tst_name_mod]["cmp-data"].append(
575 tst_data["throughput"]["NDR"]["LOWER"])
578 except (KeyError, TypeError):
583 for job, builds in item["data"].items():
585 for tst_name, tst_data in data[job][str(build)].iteritems():
586 if item["nic"] not in tst_data["tags"]:
588 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
589 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
590 replace("-ndrdisc", "").replace("-pdr", ""). \
591 replace("-ndr", "").\
592 replace("1t1c", "1c").replace("2t1c", "1c").\
593 replace("2t2c", "2c").replace("4t2c", "2c").\
594 replace("4t4c", "4c").replace("8t4c", "4c")
595 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
596 if "across topologies" in table["title"].lower():
597 tst_name_mod = tst_name_mod.replace("2n1l-", "")
598 if tbl_dict.get(tst_name_mod, None) is None:
600 if tbl_dict[tst_name_mod].get("history", None) is None:
601 tbl_dict[tst_name_mod]["history"] = OrderedDict()
602 if tbl_dict[tst_name_mod]["history"].get(item["title"],
604 tbl_dict[tst_name_mod]["history"][item["title"]] = \
607 # TODO: Re-work when NDRPDRDISC tests are not used
608 if table["include-tests"] == "MRR":
609 tbl_dict[tst_name_mod]["history"][item["title"
610 ]].append(tst_data["result"]["receive-rate"].
612 elif table["include-tests"] == "PDR":
613 if tst_data["type"] == "PDR":
614 tbl_dict[tst_name_mod]["history"][
616 append(tst_data["throughput"]["value"])
617 elif tst_data["type"] == "NDRPDR":
618 tbl_dict[tst_name_mod]["history"][item[
619 "title"]].append(tst_data["throughput"][
621 elif table["include-tests"] == "NDR":
622 if tst_data["type"] == "NDR":
623 tbl_dict[tst_name_mod]["history"][
625 append(tst_data["throughput"]["value"])
626 elif tst_data["type"] == "NDRPDR":
627 tbl_dict[tst_name_mod]["history"][item[
628 "title"]].append(tst_data["throughput"][
632 except (TypeError, KeyError):
636 for tst_name in tbl_dict.keys():
637 item = [tbl_dict[tst_name]["name"], ]
639 if tbl_dict[tst_name].get("history", None) is not None:
640 for hist_data in tbl_dict[tst_name]["history"].values():
642 item.append(round(mean(hist_data) / 1000000, 2))
643 item.append(round(stdev(hist_data) / 1000000, 2))
645 item.extend([None, None])
647 item.extend([None, None])
648 data_t = tbl_dict[tst_name]["ref-data"]
650 item.append(round(mean(data_t) / 1000000, 2))
651 item.append(round(stdev(data_t) / 1000000, 2))
653 item.extend([None, None])
654 data_t = tbl_dict[tst_name]["cmp-data"]
656 item.append(round(mean(data_t) / 1000000, 2))
657 item.append(round(stdev(data_t) / 1000000, 2))
659 item.extend([None, None])
660 if "dot1q" in tbl_dict[tst_name]["name"]:
661 item.append("Changed methodology")
662 elif item[-4] is not None and item[-2] is not None and item[-4] != 0:
663 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
666 if len(item) == len(header):
669 # Sort the table according to the relative change
670 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
672 # Generate csv tables:
673 csv_file = "{0}.csv".format(table["output-file"])
674 with open(csv_file, "w") as file_handler:
675 file_handler.write(header_str)
677 file_handler.write(",".join([str(item) for item in test]) + "\n")
679 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
682 def table_nics_comparison(table, input_data):
683 """Generate the table(s) with algorithm: table_nics_comparison
684 specified in the specification file.
686 :param table: Table to generate.
687 :param input_data: Data to process.
688 :type table: pandas.Series
689 :type input_data: InputData
692 logging.info(" Generating the table {0} ...".
693 format(table.get("title", "")))
696 logging.info(" Creating the data set for the {0} '{1}'.".
697 format(table.get("type", ""), table.get("title", "")))
698 data = input_data.filter_data(table, continue_on_error=True)
700 # Prepare the header of the tables
702 header = ["Test case", ]
704 if table["include-tests"] == "MRR":
705 hdr_param = "Receive Rate"
707 hdr_param = "Throughput"
710 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
711 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
712 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
713 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
715 header_str = ",".join(header) + "\n"
716 except (AttributeError, KeyError) as err:
717 logging.error("The model is invalid, missing parameter: {0}".
721 # Prepare data to the table:
723 for job, builds in table["data"].items():
725 for tst_name, tst_data in data[job][str(build)].iteritems():
726 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
727 replace("-ndrpdr", "").replace("-pdrdisc", "").\
728 replace("-ndrdisc", "").replace("-pdr", "").\
729 replace("-ndr", "").\
730 replace("1t1c", "1c").replace("2t1c", "1c").\
731 replace("2t2c", "2c").replace("4t2c", "2c").\
732 replace("4t4c", "4c").replace("8t4c", "4c")
733 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
734 if tbl_dict.get(tst_name_mod, None) is None:
735 name = "-".join(tst_data["name"].split("-")[:-1])
736 tbl_dict[tst_name_mod] = {"name": name,
740 if table["include-tests"] == "MRR":
741 result = tst_data["result"]["receive-rate"].avg
742 elif table["include-tests"] == "PDR":
743 result = tst_data["throughput"]["PDR"]["LOWER"]
744 elif table["include-tests"] == "NDR":
745 result = tst_data["throughput"]["NDR"]["LOWER"]
750 if table["reference"]["nic"] in tst_data["tags"]:
751 tbl_dict[tst_name_mod]["ref-data"].append(result)
752 elif table["compare"]["nic"] in tst_data["tags"]:
753 tbl_dict[tst_name_mod]["cmp-data"].append(result)
754 except (TypeError, KeyError) as err:
755 logging.debug("No data for {0}".format(tst_name))
756 logging.debug(repr(err))
757 # No data in output.xml for this test
760 for tst_name in tbl_dict.keys():
761 item = [tbl_dict[tst_name]["name"], ]
762 data_t = tbl_dict[tst_name]["ref-data"]
764 item.append(round(mean(data_t) / 1000000, 2))
765 item.append(round(stdev(data_t) / 1000000, 2))
767 item.extend([None, None])
768 data_t = tbl_dict[tst_name]["cmp-data"]
770 item.append(round(mean(data_t) / 1000000, 2))
771 item.append(round(stdev(data_t) / 1000000, 2))
773 item.extend([None, None])
774 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
775 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
776 if len(item) == len(header):
779 # Sort the table according to the relative change
780 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
782 # Generate csv tables:
783 csv_file = "{0}.csv".format(table["output-file"])
784 with open(csv_file, "w") as file_handler:
785 file_handler.write(header_str)
787 file_handler.write(",".join([str(item) for item in test]) + "\n")
789 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
792 def table_soak_vs_ndr(table, input_data):
793 """Generate the table(s) with algorithm: table_soak_vs_ndr
794 specified in the specification file.
796 :param table: Table to generate.
797 :param input_data: Data to process.
798 :type table: pandas.Series
799 :type input_data: InputData
802 logging.info(" Generating the table {0} ...".
803 format(table.get("title", "")))
806 logging.info(" Creating the data set for the {0} '{1}'.".
807 format(table.get("type", ""), table.get("title", "")))
808 data = input_data.filter_data(table, continue_on_error=True)
810 # Prepare the header of the table
814 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
815 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
816 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
817 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
818 "Delta [%]", "Stdev of delta [%]"]
819 header_str = ",".join(header) + "\n"
820 except (AttributeError, KeyError) as err:
821 logging.error("The model is invalid, missing parameter: {0}".
825 # Create a list of available SOAK test results:
827 for job, builds in table["compare"]["data"].items():
829 for tst_name, tst_data in data[job][str(build)].iteritems():
830 if tst_data["type"] == "SOAK":
831 tst_name_mod = tst_name.replace("-soak", "")
832 if tbl_dict.get(tst_name_mod, None) is None:
833 groups = re.search(REGEX_NIC, tst_data["parent"])
834 nic = groups.group(0) if groups else ""
835 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
837 tbl_dict[tst_name_mod] = {
843 tbl_dict[tst_name_mod]["cmp-data"].append(
844 tst_data["throughput"]["LOWER"])
845 except (KeyError, TypeError):
847 tests_lst = tbl_dict.keys()
849 # Add corresponding NDR test results:
850 for job, builds in table["reference"]["data"].items():
852 for tst_name, tst_data in data[job][str(build)].iteritems():
853 tst_name_mod = tst_name.replace("-ndrpdr", "").\
855 if tst_name_mod in tests_lst:
857 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
858 if table["include-tests"] == "MRR":
859 result = tst_data["result"]["receive-rate"].avg
860 elif table["include-tests"] == "PDR":
861 result = tst_data["throughput"]["PDR"]["LOWER"]
862 elif table["include-tests"] == "NDR":
863 result = tst_data["throughput"]["NDR"]["LOWER"]
866 if result is not None:
867 tbl_dict[tst_name_mod]["ref-data"].append(
869 except (KeyError, TypeError):
873 for tst_name in tbl_dict.keys():
874 item = [tbl_dict[tst_name]["name"], ]
875 data_r = tbl_dict[tst_name]["ref-data"]
877 data_r_mean = mean(data_r)
878 item.append(round(data_r_mean / 1000000, 2))
879 data_r_stdev = stdev(data_r)
880 item.append(round(data_r_stdev / 1000000, 2))
884 item.extend([None, None])
885 data_c = tbl_dict[tst_name]["cmp-data"]
887 data_c_mean = mean(data_c)
888 item.append(round(data_c_mean / 1000000, 2))
889 data_c_stdev = stdev(data_c)
890 item.append(round(data_c_stdev / 1000000, 2))
894 item.extend([None, None])
895 if data_r_mean and data_c_mean:
896 delta, d_stdev = relative_change_stdev(
897 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
898 item.append(round(delta, 2))
899 item.append(round(d_stdev, 2))
902 # Sort the table according to the relative change
903 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
905 # Generate csv tables:
906 csv_file = "{0}.csv".format(table["output-file"])
907 with open(csv_file, "w") as file_handler:
908 file_handler.write(header_str)
910 file_handler.write(",".join([str(item) for item in test]) + "\n")
912 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
915 def table_performance_trending_dashboard(table, input_data):
916 """Generate the table(s) with algorithm:
917 table_performance_trending_dashboard
918 specified in the specification file.
920 :param table: Table to generate.
921 :param input_data: Data to process.
922 :type table: pandas.Series
923 :type input_data: InputData
926 logging.info(" Generating the table {0} ...".
927 format(table.get("title", "")))
930 logging.info(" Creating the data set for the {0} '{1}'.".
931 format(table.get("type", ""), table.get("title", "")))
932 data = input_data.filter_data(table, continue_on_error=True)
934 # Prepare the header of the tables
935 header = ["Test Case",
937 "Short-Term Change [%]",
938 "Long-Term Change [%]",
942 header_str = ",".join(header) + "\n"
944 # Prepare data to the table:
946 for job, builds in table["data"].items():
948 for tst_name, tst_data in data[job][str(build)].iteritems():
949 if tst_name.lower() in table.get("ignore-list", list()):
951 if tbl_dict.get(tst_name, None) is None:
952 groups = re.search(REGEX_NIC, tst_data["parent"])
955 nic = groups.group(0)
956 tbl_dict[tst_name] = {
957 "name": "{0}-{1}".format(nic, tst_data["name"]),
958 "data": OrderedDict()}
960 tbl_dict[tst_name]["data"][str(build)] = \
961 tst_data["result"]["receive-rate"]
962 except (TypeError, KeyError):
963 pass # No data in output.xml for this test
966 for tst_name in tbl_dict.keys():
967 data_t = tbl_dict[tst_name]["data"]
971 classification_lst, avgs = classify_anomalies(data_t)
973 win_size = min(len(data_t), table["window"])
974 long_win_size = min(len(data_t), table["long-trend-window"])
978 [x for x in avgs[-long_win_size:-win_size]
983 avg_week_ago = avgs[max(-win_size, -len(avgs))]
985 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
986 rel_change_last = nan
988 rel_change_last = round(
989 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
991 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
992 rel_change_long = nan
994 rel_change_long = round(
995 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
997 if classification_lst:
998 if isnan(rel_change_last) and isnan(rel_change_long):
1000 if (isnan(last_avg) or
1001 isnan(rel_change_last) or
1002 isnan(rel_change_long)):
1005 [tbl_dict[tst_name]["name"],
1006 round(last_avg / 1000000, 2),
1009 classification_lst[-win_size:].count("regression"),
1010 classification_lst[-win_size:].count("progression")])
1012 tbl_lst.sort(key=lambda rel: rel[0])
1015 for nrr in range(table["window"], -1, -1):
1016 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1017 for nrp in range(table["window"], -1, -1):
1018 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1019 tbl_out.sort(key=lambda rel: rel[2])
1020 tbl_sorted.extend(tbl_out)
1022 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1024 logging.info(" Writing file: '{0}'".format(file_name))
1025 with open(file_name, "w") as file_handler:
1026 file_handler.write(header_str)
1027 for test in tbl_sorted:
1028 file_handler.write(",".join([str(item) for item in test]) + '\n')
1030 txt_file_name = "{0}.txt".format(table["output-file"])
1031 logging.info(" Writing file: '{0}'".format(txt_file_name))
1032 convert_csv_to_pretty_txt(file_name, txt_file_name)
1035 def _generate_url(base, testbed, test_name):
1036 """Generate URL to a trending plot from the name of the test case.
1038 :param base: The base part of URL common to all test cases.
1039 :param testbed: The testbed used for testing.
1040 :param test_name: The name of the test case.
1043 :type test_name: str
1044 :returns: The URL to the plot with the trending data for the given test
1054 if "lbdpdk" in test_name or "lbvpp" in test_name:
1055 file_name = "link_bonding"
1057 elif "114b" in test_name and "vhost" in test_name:
1060 elif "testpmd" in test_name or "l3fwd" in test_name:
1063 elif "memif" in test_name:
1064 file_name = "container_memif"
1067 elif "srv6" in test_name:
1070 elif "vhost" in test_name:
1071 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1072 file_name = "vm_vhost_l2"
1073 if "114b" in test_name:
1075 elif "l2xcbase" in test_name and "x520" in test_name:
1076 feature = "-base-l2xc"
1077 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1078 feature = "-base-l2bd"
1081 elif "ip4base" in test_name:
1082 file_name = "vm_vhost_ip4"
1085 elif "ipsecbasetnlsw" in test_name:
1086 file_name = "ipsecsw"
1087 feature = "-base-scale"
1089 elif "ipsec" in test_name:
1091 feature = "-base-scale"
1092 if "hw-" in test_name:
1093 file_name = "ipsechw"
1094 elif "sw-" in test_name:
1095 file_name = "ipsecsw"
1096 if "-int-" in test_name:
1097 feature = "-base-scale-int"
1098 elif "tnl" in test_name:
1099 feature = "-base-scale-tnl"
1101 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1102 file_name = "ip4_tunnels"
1105 elif "ip4base" in test_name or "ip4scale" in test_name:
1107 if "xl710" in test_name:
1108 feature = "-base-scale-features"
1109 elif "iacl" in test_name:
1110 feature = "-features-iacl"
1111 elif "oacl" in test_name:
1112 feature = "-features-oacl"
1113 elif "snat" in test_name or "cop" in test_name:
1114 feature = "-features"
1116 feature = "-base-scale"
1118 elif "ip6base" in test_name or "ip6scale" in test_name:
1120 feature = "-base-scale"
1122 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1123 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1124 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1126 if "macip" in test_name:
1127 feature = "-features-macip"
1128 elif "iacl" in test_name:
1129 feature = "-features-iacl"
1130 elif "oacl" in test_name:
1131 feature = "-features-oacl"
1133 feature = "-base-scale"
1135 if "x520" in test_name:
1137 elif "x710" in test_name:
1139 elif "xl710" in test_name:
1141 elif "xxv710" in test_name:
1143 elif "vic1227" in test_name:
1145 elif "vic1385" in test_name:
1147 elif "x553" in test_name:
1153 if "64b" in test_name:
1155 elif "78b" in test_name:
1157 elif "imix" in test_name:
1159 elif "9000b" in test_name:
1161 elif "1518b" in test_name:
1163 elif "114b" in test_name:
1167 anchor += framesize + '-'
1169 if "1t1c" in test_name:
1171 elif "2t2c" in test_name:
1173 elif "4t4c" in test_name:
1175 elif "2t1c" in test_name:
1177 elif "4t2c" in test_name:
1179 elif "8t4c" in test_name:
1182 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1183 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1186 def table_performance_trending_dashboard_html(table, input_data):
1187 """Generate the table(s) with algorithm:
1188 table_performance_trending_dashboard_html specified in the specification
1191 :param table: Table to generate.
1192 :param input_data: Data to process.
1194 :type input_data: InputData
1197 testbed = table.get("testbed", None)
1199 logging.error("The testbed is not defined for the table '{0}'.".
1200 format(table.get("title", "")))
1203 logging.info(" Generating the table {0} ...".
1204 format(table.get("title", "")))
1207 with open(table["input-file"], 'rb') as csv_file:
1208 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1209 csv_lst = [item for item in csv_content]
1211 logging.warning("The input file is not defined.")
1213 except csv.Error as err:
1214 logging.warning("Not possible to process the file '{0}'.\n{1}".
1215 format(table["input-file"], err))
1219 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1222 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1223 for idx, item in enumerate(csv_lst[0]):
1224 alignment = "left" if idx == 0 else "center"
1225 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1229 colors = {"regression": ("#ffcccc", "#ff9999"),
1230 "progression": ("#c6ecc6", "#9fdf9f"),
1231 "normal": ("#e9f1fb", "#d4e4f7")}
1232 for r_idx, row in enumerate(csv_lst[1:]):
1234 color = "regression"
1236 color = "progression"
1239 background = colors[color][r_idx % 2]
1240 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1243 for c_idx, item in enumerate(row):
1244 alignment = "left" if c_idx == 0 else "center"
1245 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1248 url = _generate_url("../trending/", testbed, item)
1249 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1254 with open(table["output-file"], 'w') as html_file:
1255 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1256 html_file.write(".. raw:: html\n\n\t")
1257 html_file.write(ET.tostring(dashboard))
1258 html_file.write("\n\t<p><br><br></p>\n")
1260 logging.warning("The output file is not defined.")
1264 def table_last_failed_tests(table, input_data):
1265 """Generate the table(s) with algorithm: table_last_failed_tests
1266 specified in the specification file.
1268 :param table: Table to generate.
1269 :param input_data: Data to process.
1270 :type table: pandas.Series
1271 :type input_data: InputData
1274 logging.info(" Generating the table {0} ...".
1275 format(table.get("title", "")))
1277 # Transform the data
1278 logging.info(" Creating the data set for the {0} '{1}'.".
1279 format(table.get("type", ""), table.get("title", "")))
1280 data = input_data.filter_data(table, continue_on_error=True)
1282 if data is None or data.empty:
1283 logging.warn(" No data for the {0} '{1}'.".
1284 format(table.get("type", ""), table.get("title", "")))
1288 for job, builds in table["data"].items():
1289 for build in builds:
1292 version = input_data.metadata(job, build).get("version", "")
1294 logging.error("Data for {job}: {build} is not present.".
1295 format(job=job, build=build))
1297 tbl_list.append(build)
1298 tbl_list.append(version)
1299 for tst_name, tst_data in data[job][build].iteritems():
1300 if tst_data["status"] != "FAIL":
1302 groups = re.search(REGEX_NIC, tst_data["parent"])
1305 nic = groups.group(0)
1306 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1308 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1309 logging.info(" Writing file: '{0}'".format(file_name))
1310 with open(file_name, "w") as file_handler:
1311 for test in tbl_list:
1312 file_handler.write(test + '\n')
1315 def table_failed_tests(table, input_data):
1316 """Generate the table(s) with algorithm: table_failed_tests
1317 specified in the specification file.
1319 :param table: Table to generate.
1320 :param input_data: Data to process.
1321 :type table: pandas.Series
1322 :type input_data: InputData
1325 logging.info(" Generating the table {0} ...".
1326 format(table.get("title", "")))
1328 # Transform the data
1329 logging.info(" Creating the data set for the {0} '{1}'.".
1330 format(table.get("type", ""), table.get("title", "")))
1331 data = input_data.filter_data(table, continue_on_error=True)
1333 # Prepare the header of the tables
1334 header = ["Test Case",
1336 "Last Failure [Time]",
1337 "Last Failure [VPP-Build-Id]",
1338 "Last Failure [CSIT-Job-Build-Id]"]
1340 # Generate the data for the table according to the model in the table
1344 timeperiod = timedelta(int(table.get("window", 7)))
1347 for job, builds in table["data"].items():
1348 for build in builds:
1350 for tst_name, tst_data in data[job][build].iteritems():
1351 if tst_name.lower() in table.get("ignore-list", list()):
1353 if tbl_dict.get(tst_name, None) is None:
1354 groups = re.search(REGEX_NIC, tst_data["parent"])
1357 nic = groups.group(0)
1358 tbl_dict[tst_name] = {
1359 "name": "{0}-{1}".format(nic, tst_data["name"]),
1360 "data": OrderedDict()}
1362 generated = input_data.metadata(job, build).\
1363 get("generated", "")
1366 then = dt.strptime(generated, "%Y%m%d %H:%M")
1367 if (now - then) <= timeperiod:
1368 tbl_dict[tst_name]["data"][build] = (
1371 input_data.metadata(job, build).get("version", ""),
1373 except (TypeError, KeyError) as err:
1374 logging.warning("tst_name: {} - err: {}".
1375 format(tst_name, repr(err)))
1379 for tst_data in tbl_dict.values():
1381 for val in tst_data["data"].values():
1382 if val[0] == "FAIL":
1384 fails_last_date = val[1]
1385 fails_last_vpp = val[2]
1386 fails_last_csit = val[3]
1388 max_fails = fails_nr if fails_nr > max_fails else max_fails
1389 tbl_lst.append([tst_data["name"],
1393 "mrr-daily-build-{0}".format(fails_last_csit)])
1395 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1397 for nrf in range(max_fails, -1, -1):
1398 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1399 tbl_sorted.extend(tbl_fails)
1400 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1402 logging.info(" Writing file: '{0}'".format(file_name))
1403 with open(file_name, "w") as file_handler:
1404 file_handler.write(",".join(header) + "\n")
1405 for test in tbl_sorted:
1406 file_handler.write(",".join([str(item) for item in test]) + '\n')
1408 txt_file_name = "{0}.txt".format(table["output-file"])
1409 logging.info(" Writing file: '{0}'".format(txt_file_name))
1410 convert_csv_to_pretty_txt(file_name, txt_file_name)
1413 def table_failed_tests_html(table, input_data):
1414 """Generate the table(s) with algorithm: table_failed_tests_html
1415 specified in the specification file.
1417 :param table: Table to generate.
1418 :param input_data: Data to process.
1419 :type table: pandas.Series
1420 :type input_data: InputData
1423 testbed = table.get("testbed", None)
1425 logging.error("The testbed is not defined for the table '{0}'.".
1426 format(table.get("title", "")))
1429 logging.info(" Generating the table {0} ...".
1430 format(table.get("title", "")))
1433 with open(table["input-file"], 'rb') as csv_file:
1434 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1435 csv_lst = [item for item in csv_content]
1437 logging.warning("The input file is not defined.")
1439 except csv.Error as err:
1440 logging.warning("Not possible to process the file '{0}'.\n{1}".
1441 format(table["input-file"], err))
1445 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1448 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1449 for idx, item in enumerate(csv_lst[0]):
1450 alignment = "left" if idx == 0 else "center"
1451 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1455 colors = ("#e9f1fb", "#d4e4f7")
1456 for r_idx, row in enumerate(csv_lst[1:]):
1457 background = colors[r_idx % 2]
1458 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1461 for c_idx, item in enumerate(row):
1462 alignment = "left" if c_idx == 0 else "center"
1463 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1466 url = _generate_url("../trending/", testbed, item)
1467 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1472 with open(table["output-file"], 'w') as html_file:
1473 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1474 html_file.write(".. raw:: html\n\n\t")
1475 html_file.write(ET.tostring(failed_tests))
1476 html_file.write("\n\t<p><br><br></p>\n")
1478 logging.warning("The output file is not defined.")