1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from string import replace
23 from collections import OrderedDict
24 from numpy import nan, isnan
25 from xml.etree import ElementTree as ET
26 from datetime import datetime as dt
27 from datetime import timedelta
29 from utils import mean, stdev, relative_change, classify_anomalies, \
30 convert_csv_to_pretty_txt, relative_change_stdev
33 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
36 def generate_tables(spec, data):
37 """Generate all tables specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
45 logging.info("Generating the tables ...")
46 for table in spec.tables:
48 eval(table["algorithm"])(table, data)
49 except NameError as err:
50 logging.error("Probably algorithm '{alg}' is not defined: {err}".
51 format(alg=table["algorithm"], err=repr(err)))
55 def table_details(table, input_data):
56 """Generate the table(s) with algorithm: table_detailed_test_results
57 specified in the specification file.
59 :param table: Table to generate.
60 :param input_data: Data to process.
61 :type table: pandas.Series
62 :type input_data: InputData
65 logging.info(" Generating the table {0} ...".
66 format(table.get("title", "")))
69 logging.info(" Creating the data set for the {0} '{1}'.".
70 format(table.get("type", ""), table.get("title", "")))
71 data = input_data.filter_data(table)
73 # Prepare the header of the tables
75 for column in table["columns"]:
76 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
78 # Generate the data for the table according to the model in the table
80 job = table["data"].keys()[0]
81 build = str(table["data"][job][0])
83 suites = input_data.suites(job, build)
85 logging.error(" No data available. The table will not be generated.")
88 for suite_longname, suite in suites.iteritems():
90 suite_name = suite["name"]
92 for test in data[job][build].keys():
93 if data[job][build][test]["parent"] in suite_name:
95 for column in table["columns"]:
97 col_data = str(data[job][build][test][column["data"].
98 split(" ")[1]]).replace('"', '""')
99 if column["data"].split(" ")[1] in ("conf-history",
101 col_data = replace(col_data, " |br| ", "",
103 col_data = " |prein| {0} |preout| ".\
104 format(col_data[:-5])
105 row_lst.append('"{0}"'.format(col_data))
107 row_lst.append("No data")
108 table_lst.append(row_lst)
110 # Write the data to file
112 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
113 table["output-file-ext"])
114 logging.info(" Writing file: '{}'".format(file_name))
115 with open(file_name, "w") as file_handler:
116 file_handler.write(",".join(header) + "\n")
117 for item in table_lst:
118 file_handler.write(",".join(item) + "\n")
120 logging.info(" Done.")
123 def table_merged_details(table, input_data):
124 """Generate the table(s) with algorithm: table_merged_details
125 specified in the specification file.
127 :param table: Table to generate.
128 :param input_data: Data to process.
129 :type table: pandas.Series
130 :type input_data: InputData
133 logging.info(" Generating the table {0} ...".
134 format(table.get("title", "")))
137 logging.info(" Creating the data set for the {0} '{1}'.".
138 format(table.get("type", ""), table.get("title", "")))
139 data = input_data.filter_data(table)
140 data = input_data.merge_data(data)
141 data.sort_index(inplace=True)
143 logging.info(" Creating the data set for the {0} '{1}'.".
144 format(table.get("type", ""), table.get("title", "")))
145 suites = input_data.filter_data(table, data_set="suites")
146 suites = input_data.merge_data(suites)
148 # Prepare the header of the tables
150 for column in table["columns"]:
151 header.append('"{0}"'.format(str(column["title"]).replace('"', '""')))
153 for _, suite in suites.iteritems():
155 suite_name = suite["name"]
157 for test in data.keys():
158 if data[test]["parent"] in suite_name:
160 for column in table["columns"]:
162 col_data = str(data[test][column["data"].
163 split(" ")[1]]).replace('"', '""')
164 col_data = replace(col_data, "No Data",
166 if column["data"].split(" ")[1] in ("conf-history",
168 col_data = replace(col_data, " |br| ", "",
170 col_data = " |prein| {0} |preout| ".\
171 format(col_data[:-5])
172 row_lst.append('"{0}"'.format(col_data))
174 row_lst.append('"Not captured"')
175 table_lst.append(row_lst)
177 # Write the data to file
179 file_name = "{0}_{1}{2}".format(table["output-file"], suite_name,
180 table["output-file-ext"])
181 logging.info(" Writing file: '{}'".format(file_name))
182 with open(file_name, "w") as file_handler:
183 file_handler.write(",".join(header) + "\n")
184 for item in table_lst:
185 file_handler.write(",".join(item) + "\n")
187 logging.info(" Done.")
190 def table_performance_comparison(table, input_data):
191 """Generate the table(s) with algorithm: table_performance_comparison
192 specified in the specification file.
194 :param table: Table to generate.
195 :param input_data: Data to process.
196 :type table: pandas.Series
197 :type input_data: InputData
200 logging.info(" Generating the table {0} ...".
201 format(table.get("title", "")))
204 logging.info(" Creating the data set for the {0} '{1}'.".
205 format(table.get("type", ""), table.get("title", "")))
206 data = input_data.filter_data(table, continue_on_error=True)
208 # Prepare the header of the tables
210 header = ["Test case", ]
212 if table["include-tests"] == "MRR":
213 hdr_param = "Receive Rate"
215 hdr_param = "Throughput"
217 history = table.get("history", None)
221 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
222 "{0} Stdev [Mpps]".format(item["title"])])
224 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
225 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
226 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
227 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
229 header_str = ",".join(header) + "\n"
230 except (AttributeError, KeyError) as err:
231 logging.error("The model is invalid, missing parameter: {0}".
235 # Prepare data to the table:
237 for job, builds in table["reference"]["data"].items():
239 for tst_name, tst_data in data[job][str(build)].iteritems():
240 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
241 replace("-ndrpdr", "").replace("-pdrdisc", "").\
242 replace("-ndrdisc", "").replace("-pdr", "").\
243 replace("-ndr", "").\
244 replace("1t1c", "1c").replace("2t1c", "1c").\
245 replace("2t2c", "2c").replace("4t2c", "2c").\
246 replace("4t4c", "4c").replace("8t4c", "4c")
247 if "across topologies" in table["title"].lower():
248 tst_name_mod = tst_name_mod.replace("2n1l-", "")
249 if tbl_dict.get(tst_name_mod, None) is None:
250 groups = re.search(REGEX_NIC, tst_data["parent"])
251 nic = groups.group(0) if groups else ""
252 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
254 if "across testbeds" in table["title"].lower() or \
255 "across topologies" in table["title"].lower():
257 replace("1t1c", "1c").replace("2t1c", "1c").\
258 replace("2t2c", "2c").replace("4t2c", "2c").\
259 replace("4t4c", "4c").replace("8t4c", "4c")
260 tbl_dict[tst_name_mod] = {"name": name,
264 # TODO: Re-work when NDRPDRDISC tests are not used
265 if table["include-tests"] == "MRR":
266 tbl_dict[tst_name_mod]["ref-data"]. \
267 append(tst_data["result"]["receive-rate"].avg)
268 elif table["include-tests"] == "PDR":
269 if tst_data["type"] == "PDR":
270 tbl_dict[tst_name_mod]["ref-data"]. \
271 append(tst_data["throughput"]["value"])
272 elif tst_data["type"] == "NDRPDR":
273 tbl_dict[tst_name_mod]["ref-data"].append(
274 tst_data["throughput"]["PDR"]["LOWER"])
275 elif table["include-tests"] == "NDR":
276 if tst_data["type"] == "NDR":
277 tbl_dict[tst_name_mod]["ref-data"]. \
278 append(tst_data["throughput"]["value"])
279 elif tst_data["type"] == "NDRPDR":
280 tbl_dict[tst_name_mod]["ref-data"].append(
281 tst_data["throughput"]["NDR"]["LOWER"])
285 pass # No data in output.xml for this test
287 for job, builds in table["compare"]["data"].items():
289 for tst_name, tst_data in data[job][str(build)].iteritems():
290 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
291 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
292 replace("-ndrdisc", "").replace("-pdr", ""). \
293 replace("-ndr", "").\
294 replace("1t1c", "1c").replace("2t1c", "1c").\
295 replace("2t2c", "2c").replace("4t2c", "2c").\
296 replace("4t4c", "4c").replace("8t4c", "4c")
297 if "across topologies" in table["title"].lower():
298 tst_name_mod = tst_name_mod.replace("2n1l-", "")
299 if tbl_dict.get(tst_name_mod, None) is None:
300 groups = re.search(REGEX_NIC, tst_data["parent"])
301 nic = groups.group(0) if groups else ""
302 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
304 if "across testbeds" in table["title"].lower() or \
305 "across topologies" in table["title"].lower():
307 replace("1t1c", "1c").replace("2t1c", "1c").\
308 replace("2t2c", "2c").replace("4t2c", "2c").\
309 replace("4t4c", "4c").replace("8t4c", "4c")
310 tbl_dict[tst_name_mod] = {"name": name,
314 # TODO: Re-work when NDRPDRDISC tests are not used
315 if table["include-tests"] == "MRR":
316 tbl_dict[tst_name_mod]["cmp-data"]. \
317 append(tst_data["result"]["receive-rate"].avg)
318 elif table["include-tests"] == "PDR":
319 if tst_data["type"] == "PDR":
320 tbl_dict[tst_name_mod]["cmp-data"]. \
321 append(tst_data["throughput"]["value"])
322 elif tst_data["type"] == "NDRPDR":
323 tbl_dict[tst_name_mod]["cmp-data"].append(
324 tst_data["throughput"]["PDR"]["LOWER"])
325 elif table["include-tests"] == "NDR":
326 if tst_data["type"] == "NDR":
327 tbl_dict[tst_name_mod]["cmp-data"]. \
328 append(tst_data["throughput"]["value"])
329 elif tst_data["type"] == "NDRPDR":
330 tbl_dict[tst_name_mod]["cmp-data"].append(
331 tst_data["throughput"]["NDR"]["LOWER"])
334 except (KeyError, TypeError):
338 for job, builds in item["data"].items():
340 for tst_name, tst_data in data[job][str(build)].iteritems():
341 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
342 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
343 replace("-ndrdisc", "").replace("-pdr", ""). \
344 replace("-ndr", "").\
345 replace("1t1c", "1c").replace("2t1c", "1c").\
346 replace("2t2c", "2c").replace("4t2c", "2c").\
347 replace("4t4c", "4c").replace("8t4c", "4c")
348 if "across topologies" in table["title"].lower():
349 tst_name_mod = tst_name_mod.replace("2n1l-", "")
350 if tbl_dict.get(tst_name_mod, None) is None:
352 if tbl_dict[tst_name_mod].get("history", None) is None:
353 tbl_dict[tst_name_mod]["history"] = OrderedDict()
354 if tbl_dict[tst_name_mod]["history"].get(item["title"],
356 tbl_dict[tst_name_mod]["history"][item["title"]] = \
359 # TODO: Re-work when NDRPDRDISC tests are not used
360 if table["include-tests"] == "MRR":
361 tbl_dict[tst_name_mod]["history"][item["title"
362 ]].append(tst_data["result"]["receive-rate"].
364 elif table["include-tests"] == "PDR":
365 if tst_data["type"] == "PDR":
366 tbl_dict[tst_name_mod]["history"][
368 append(tst_data["throughput"]["value"])
369 elif tst_data["type"] == "NDRPDR":
370 tbl_dict[tst_name_mod]["history"][item[
371 "title"]].append(tst_data["throughput"][
373 elif table["include-tests"] == "NDR":
374 if tst_data["type"] == "NDR":
375 tbl_dict[tst_name_mod]["history"][
377 append(tst_data["throughput"]["value"])
378 elif tst_data["type"] == "NDRPDR":
379 tbl_dict[tst_name_mod]["history"][item[
380 "title"]].append(tst_data["throughput"][
384 except (TypeError, KeyError):
388 for tst_name in tbl_dict.keys():
389 item = [tbl_dict[tst_name]["name"], ]
391 if tbl_dict[tst_name].get("history", None) is not None:
392 for hist_data in tbl_dict[tst_name]["history"].values():
394 item.append(round(mean(hist_data) / 1000000, 2))
395 item.append(round(stdev(hist_data) / 1000000, 2))
397 item.extend([None, None])
399 item.extend([None, None])
400 data_t = tbl_dict[tst_name]["ref-data"]
402 item.append(round(mean(data_t) / 1000000, 2))
403 item.append(round(stdev(data_t) / 1000000, 2))
405 item.extend([None, None])
406 data_t = tbl_dict[tst_name]["cmp-data"]
408 item.append(round(mean(data_t) / 1000000, 2))
409 item.append(round(stdev(data_t) / 1000000, 2))
411 item.extend([None, None])
412 if "dot1q" in tbl_dict[tst_name]["name"]:
413 item.append("Changed methodology")
414 elif item[-4] is not None and item[-2] is not None and item[-4] != 0:
415 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
418 if (len(item) == len(header)) and (item[-3] is not None):
421 # Sort the table according to the relative change
422 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
424 # Generate csv tables:
425 csv_file = "{0}.csv".format(table["output-file"])
426 with open(csv_file, "w") as file_handler:
427 file_handler.write(header_str)
429 file_handler.write(",".join([str(item) for item in test]) + "\n")
431 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
434 def table_performance_comparison_nic(table, input_data):
435 """Generate the table(s) with algorithm: table_performance_comparison
436 specified in the specification file.
438 :param table: Table to generate.
439 :param input_data: Data to process.
440 :type table: pandas.Series
441 :type input_data: InputData
444 logging.info(" Generating the table {0} ...".
445 format(table.get("title", "")))
448 logging.info(" Creating the data set for the {0} '{1}'.".
449 format(table.get("type", ""), table.get("title", "")))
450 data = input_data.filter_data(table, continue_on_error=True)
452 # Prepare the header of the tables
454 header = ["Test case", ]
456 if table["include-tests"] == "MRR":
457 hdr_param = "Receive Rate"
459 hdr_param = "Throughput"
461 history = table.get("history", None)
465 ["{0} {1} [Mpps]".format(item["title"], hdr_param),
466 "{0} Stdev [Mpps]".format(item["title"])])
468 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
469 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
470 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
471 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
473 header_str = ",".join(header) + "\n"
474 except (AttributeError, KeyError) as err:
475 logging.error("The model is invalid, missing parameter: {0}".
479 # Prepare data to the table:
481 for job, builds in table["reference"]["data"].items():
483 for tst_name, tst_data in data[job][str(build)].iteritems():
484 if table["reference"]["nic"] not in tst_data["tags"]:
486 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
487 replace("-ndrpdr", "").replace("-pdrdisc", "").\
488 replace("-ndrdisc", "").replace("-pdr", "").\
489 replace("-ndr", "").\
490 replace("1t1c", "1c").replace("2t1c", "1c").\
491 replace("2t2c", "2c").replace("4t2c", "2c").\
492 replace("4t4c", "4c").replace("8t4c", "4c")
493 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
494 if "across topologies" in table["title"].lower():
495 tst_name_mod = tst_name_mod.replace("2n1l-", "")
496 if tbl_dict.get(tst_name_mod, None) is None:
497 name = "{0}".format("-".join(tst_data["name"].
499 if "across testbeds" in table["title"].lower() or \
500 "across topologies" in table["title"].lower():
502 replace("1t1c", "1c").replace("2t1c", "1c").\
503 replace("2t2c", "2c").replace("4t2c", "2c").\
504 replace("4t4c", "4c").replace("8t4c", "4c")
505 tbl_dict[tst_name_mod] = {"name": name,
509 # TODO: Re-work when NDRPDRDISC tests are not used
510 if table["include-tests"] == "MRR":
511 tbl_dict[tst_name_mod]["ref-data"]. \
512 append(tst_data["result"]["receive-rate"].avg)
513 elif table["include-tests"] == "PDR":
514 if tst_data["type"] == "PDR":
515 tbl_dict[tst_name_mod]["ref-data"]. \
516 append(tst_data["throughput"]["value"])
517 elif tst_data["type"] == "NDRPDR":
518 tbl_dict[tst_name_mod]["ref-data"].append(
519 tst_data["throughput"]["PDR"]["LOWER"])
520 elif table["include-tests"] == "NDR":
521 if tst_data["type"] == "NDR":
522 tbl_dict[tst_name_mod]["ref-data"]. \
523 append(tst_data["throughput"]["value"])
524 elif tst_data["type"] == "NDRPDR":
525 tbl_dict[tst_name_mod]["ref-data"].append(
526 tst_data["throughput"]["NDR"]["LOWER"])
530 pass # No data in output.xml for this test
532 for job, builds in table["compare"]["data"].items():
534 for tst_name, tst_data in data[job][str(build)].iteritems():
535 if table["compare"]["nic"] not in tst_data["tags"]:
537 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
538 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
539 replace("-ndrdisc", "").replace("-pdr", ""). \
540 replace("-ndr", "").\
541 replace("1t1c", "1c").replace("2t1c", "1c").\
542 replace("2t2c", "2c").replace("4t2c", "2c").\
543 replace("4t4c", "4c").replace("8t4c", "4c")
544 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
545 if "across topologies" in table["title"].lower():
546 tst_name_mod = tst_name_mod.replace("2n1l-", "")
547 if tbl_dict.get(tst_name_mod, None) is None:
548 name = "{0}".format("-".join(tst_data["name"].
550 if "across testbeds" in table["title"].lower() or \
551 "across topologies" in table["title"].lower():
553 replace("1t1c", "1c").replace("2t1c", "1c").\
554 replace("2t2c", "2c").replace("4t2c", "2c").\
555 replace("4t4c", "4c").replace("8t4c", "4c")
556 tbl_dict[tst_name_mod] = {"name": name,
560 # TODO: Re-work when NDRPDRDISC tests are not used
561 if table["include-tests"] == "MRR":
562 tbl_dict[tst_name_mod]["cmp-data"]. \
563 append(tst_data["result"]["receive-rate"].avg)
564 elif table["include-tests"] == "PDR":
565 if tst_data["type"] == "PDR":
566 tbl_dict[tst_name_mod]["cmp-data"]. \
567 append(tst_data["throughput"]["value"])
568 elif tst_data["type"] == "NDRPDR":
569 tbl_dict[tst_name_mod]["cmp-data"].append(
570 tst_data["throughput"]["PDR"]["LOWER"])
571 elif table["include-tests"] == "NDR":
572 if tst_data["type"] == "NDR":
573 tbl_dict[tst_name_mod]["cmp-data"]. \
574 append(tst_data["throughput"]["value"])
575 elif tst_data["type"] == "NDRPDR":
576 tbl_dict[tst_name_mod]["cmp-data"].append(
577 tst_data["throughput"]["NDR"]["LOWER"])
580 except (KeyError, TypeError):
585 for job, builds in item["data"].items():
587 for tst_name, tst_data in data[job][str(build)].iteritems():
588 if item["nic"] not in tst_data["tags"]:
590 tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \
591 replace("-ndrpdr", "").replace("-pdrdisc", ""). \
592 replace("-ndrdisc", "").replace("-pdr", ""). \
593 replace("-ndr", "").\
594 replace("1t1c", "1c").replace("2t1c", "1c").\
595 replace("2t2c", "2c").replace("4t2c", "2c").\
596 replace("4t4c", "4c").replace("8t4c", "4c")
597 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
598 if "across topologies" in table["title"].lower():
599 tst_name_mod = tst_name_mod.replace("2n1l-", "")
600 if tbl_dict.get(tst_name_mod, None) is None:
602 if tbl_dict[tst_name_mod].get("history", None) is None:
603 tbl_dict[tst_name_mod]["history"] = OrderedDict()
604 if tbl_dict[tst_name_mod]["history"].get(item["title"],
606 tbl_dict[tst_name_mod]["history"][item["title"]] = \
609 # TODO: Re-work when NDRPDRDISC tests are not used
610 if table["include-tests"] == "MRR":
611 tbl_dict[tst_name_mod]["history"][item["title"
612 ]].append(tst_data["result"]["receive-rate"].
614 elif table["include-tests"] == "PDR":
615 if tst_data["type"] == "PDR":
616 tbl_dict[tst_name_mod]["history"][
618 append(tst_data["throughput"]["value"])
619 elif tst_data["type"] == "NDRPDR":
620 tbl_dict[tst_name_mod]["history"][item[
621 "title"]].append(tst_data["throughput"][
623 elif table["include-tests"] == "NDR":
624 if tst_data["type"] == "NDR":
625 tbl_dict[tst_name_mod]["history"][
627 append(tst_data["throughput"]["value"])
628 elif tst_data["type"] == "NDRPDR":
629 tbl_dict[tst_name_mod]["history"][item[
630 "title"]].append(tst_data["throughput"][
634 except (TypeError, KeyError):
638 for tst_name in tbl_dict.keys():
639 item = [tbl_dict[tst_name]["name"], ]
641 if tbl_dict[tst_name].get("history", None) is not None:
642 for hist_data in tbl_dict[tst_name]["history"].values():
644 item.append(round(mean(hist_data) / 1000000, 2))
645 item.append(round(stdev(hist_data) / 1000000, 2))
647 item.extend([None, None])
649 item.extend([None, None])
650 data_t = tbl_dict[tst_name]["ref-data"]
652 item.append(round(mean(data_t) / 1000000, 2))
653 item.append(round(stdev(data_t) / 1000000, 2))
655 item.extend([None, None])
656 data_t = tbl_dict[tst_name]["cmp-data"]
658 item.append(round(mean(data_t) / 1000000, 2))
659 item.append(round(stdev(data_t) / 1000000, 2))
661 item.extend([None, None])
662 if "dot1q" in tbl_dict[tst_name]["name"]:
663 item.append("Changed methodology")
664 elif item[-4] is not None and item[-2] is not None and item[-4] != 0:
665 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
668 if (len(item) == len(header)) and (item[-3] is not None):
671 # Sort the table according to the relative change
672 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
674 # Generate csv tables:
675 csv_file = "{0}.csv".format(table["output-file"])
676 with open(csv_file, "w") as file_handler:
677 file_handler.write(header_str)
679 file_handler.write(",".join([str(item) for item in test]) + "\n")
681 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
684 def table_nics_comparison(table, input_data):
685 """Generate the table(s) with algorithm: table_nics_comparison
686 specified in the specification file.
688 :param table: Table to generate.
689 :param input_data: Data to process.
690 :type table: pandas.Series
691 :type input_data: InputData
694 logging.info(" Generating the table {0} ...".
695 format(table.get("title", "")))
698 logging.info(" Creating the data set for the {0} '{1}'.".
699 format(table.get("type", ""), table.get("title", "")))
700 data = input_data.filter_data(table, continue_on_error=True)
702 # Prepare the header of the tables
704 header = ["Test case", ]
706 if table["include-tests"] == "MRR":
707 hdr_param = "Receive Rate"
709 hdr_param = "Throughput"
712 ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
713 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
714 "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
715 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
717 header_str = ",".join(header) + "\n"
718 except (AttributeError, KeyError) as err:
719 logging.error("The model is invalid, missing parameter: {0}".
723 # Prepare data to the table:
725 for job, builds in table["data"].items():
727 for tst_name, tst_data in data[job][str(build)].iteritems():
728 tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
729 replace("-ndrpdr", "").replace("-pdrdisc", "").\
730 replace("-ndrdisc", "").replace("-pdr", "").\
731 replace("-ndr", "").\
732 replace("1t1c", "1c").replace("2t1c", "1c").\
733 replace("2t2c", "2c").replace("4t2c", "2c").\
734 replace("4t4c", "4c").replace("8t4c", "4c")
735 tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
736 if tbl_dict.get(tst_name_mod, None) is None:
737 name = "-".join(tst_data["name"].split("-")[:-1])
738 tbl_dict[tst_name_mod] = {"name": name,
742 if table["include-tests"] == "MRR":
743 result = tst_data["result"]["receive-rate"].avg
744 elif table["include-tests"] == "PDR":
745 result = tst_data["throughput"]["PDR"]["LOWER"]
746 elif table["include-tests"] == "NDR":
747 result = tst_data["throughput"]["NDR"]["LOWER"]
752 if table["reference"]["nic"] in tst_data["tags"]:
753 tbl_dict[tst_name_mod]["ref-data"].append(result)
754 elif table["compare"]["nic"] in tst_data["tags"]:
755 tbl_dict[tst_name_mod]["cmp-data"].append(result)
756 except (TypeError, KeyError) as err:
757 logging.debug("No data for {0}".format(tst_name))
758 logging.debug(repr(err))
759 # No data in output.xml for this test
762 for tst_name in tbl_dict.keys():
763 item = [tbl_dict[tst_name]["name"], ]
764 data_t = tbl_dict[tst_name]["ref-data"]
766 item.append(round(mean(data_t) / 1000000, 2))
767 item.append(round(stdev(data_t) / 1000000, 2))
769 item.extend([None, None])
770 data_t = tbl_dict[tst_name]["cmp-data"]
772 item.append(round(mean(data_t) / 1000000, 2))
773 item.append(round(stdev(data_t) / 1000000, 2))
775 item.extend([None, None])
776 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
777 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
778 if len(item) == len(header):
781 # Sort the table according to the relative change
782 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
784 # Generate csv tables:
785 csv_file = "{0}.csv".format(table["output-file"])
786 with open(csv_file, "w") as file_handler:
787 file_handler.write(header_str)
789 file_handler.write(",".join([str(item) for item in test]) + "\n")
791 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
794 def table_soak_vs_ndr(table, input_data):
795 """Generate the table(s) with algorithm: table_soak_vs_ndr
796 specified in the specification file.
798 :param table: Table to generate.
799 :param input_data: Data to process.
800 :type table: pandas.Series
801 :type input_data: InputData
804 logging.info(" Generating the table {0} ...".
805 format(table.get("title", "")))
808 logging.info(" Creating the data set for the {0} '{1}'.".
809 format(table.get("type", ""), table.get("title", "")))
810 data = input_data.filter_data(table, continue_on_error=True)
812 # Prepare the header of the table
816 "{0} Throughput [Mpps]".format(table["reference"]["title"]),
817 "{0} Stdev [Mpps]".format(table["reference"]["title"]),
818 "{0} Throughput [Mpps]".format(table["compare"]["title"]),
819 "{0} Stdev [Mpps]".format(table["compare"]["title"]),
820 "Delta [%]", "Stdev of delta [%]"]
821 header_str = ",".join(header) + "\n"
822 except (AttributeError, KeyError) as err:
823 logging.error("The model is invalid, missing parameter: {0}".
827 # Create a list of available SOAK test results:
829 for job, builds in table["compare"]["data"].items():
831 for tst_name, tst_data in data[job][str(build)].iteritems():
832 if tst_data["type"] == "SOAK":
833 tst_name_mod = tst_name.replace("-soak", "")
834 if tbl_dict.get(tst_name_mod, None) is None:
835 groups = re.search(REGEX_NIC, tst_data["parent"])
836 nic = groups.group(0) if groups else ""
837 name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
839 tbl_dict[tst_name_mod] = {
845 tbl_dict[tst_name_mod]["cmp-data"].append(
846 tst_data["throughput"]["LOWER"])
847 except (KeyError, TypeError):
849 tests_lst = tbl_dict.keys()
851 # Add corresponding NDR test results:
852 for job, builds in table["reference"]["data"].items():
854 for tst_name, tst_data in data[job][str(build)].iteritems():
855 tst_name_mod = tst_name.replace("-ndrpdr", "").\
857 if tst_name_mod in tests_lst:
859 if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
860 if table["include-tests"] == "MRR":
861 result = tst_data["result"]["receive-rate"].avg
862 elif table["include-tests"] == "PDR":
863 result = tst_data["throughput"]["PDR"]["LOWER"]
864 elif table["include-tests"] == "NDR":
865 result = tst_data["throughput"]["NDR"]["LOWER"]
868 if result is not None:
869 tbl_dict[tst_name_mod]["ref-data"].append(
871 except (KeyError, TypeError):
875 for tst_name in tbl_dict.keys():
876 item = [tbl_dict[tst_name]["name"], ]
877 data_r = tbl_dict[tst_name]["ref-data"]
879 data_r_mean = mean(data_r)
880 item.append(round(data_r_mean / 1000000, 2))
881 data_r_stdev = stdev(data_r)
882 item.append(round(data_r_stdev / 1000000, 2))
886 item.extend([None, None])
887 data_c = tbl_dict[tst_name]["cmp-data"]
889 data_c_mean = mean(data_c)
890 item.append(round(data_c_mean / 1000000, 2))
891 data_c_stdev = stdev(data_c)
892 item.append(round(data_c_stdev / 1000000, 2))
896 item.extend([None, None])
897 if data_r_mean and data_c_mean:
898 delta, d_stdev = relative_change_stdev(
899 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
900 item.append(round(delta, 2))
901 item.append(round(d_stdev, 2))
904 # Sort the table according to the relative change
905 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
907 # Generate csv tables:
908 csv_file = "{0}.csv".format(table["output-file"])
909 with open(csv_file, "w") as file_handler:
910 file_handler.write(header_str)
912 file_handler.write(",".join([str(item) for item in test]) + "\n")
914 convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
917 def table_performance_trending_dashboard(table, input_data):
918 """Generate the table(s) with algorithm:
919 table_performance_trending_dashboard
920 specified in the specification file.
922 :param table: Table to generate.
923 :param input_data: Data to process.
924 :type table: pandas.Series
925 :type input_data: InputData
928 logging.info(" Generating the table {0} ...".
929 format(table.get("title", "")))
932 logging.info(" Creating the data set for the {0} '{1}'.".
933 format(table.get("type", ""), table.get("title", "")))
934 data = input_data.filter_data(table, continue_on_error=True)
936 # Prepare the header of the tables
937 header = ["Test Case",
939 "Short-Term Change [%]",
940 "Long-Term Change [%]",
944 header_str = ",".join(header) + "\n"
946 # Prepare data to the table:
948 for job, builds in table["data"].items():
950 for tst_name, tst_data in data[job][str(build)].iteritems():
951 if tst_name.lower() in table.get("ignore-list", list()):
953 if tbl_dict.get(tst_name, None) is None:
954 groups = re.search(REGEX_NIC, tst_data["parent"])
957 nic = groups.group(0)
958 tbl_dict[tst_name] = {
959 "name": "{0}-{1}".format(nic, tst_data["name"]),
960 "data": OrderedDict()}
962 tbl_dict[tst_name]["data"][str(build)] = \
963 tst_data["result"]["receive-rate"]
964 except (TypeError, KeyError):
965 pass # No data in output.xml for this test
968 for tst_name in tbl_dict.keys():
969 data_t = tbl_dict[tst_name]["data"]
973 classification_lst, avgs = classify_anomalies(data_t)
975 win_size = min(len(data_t), table["window"])
976 long_win_size = min(len(data_t), table["long-trend-window"])
980 [x for x in avgs[-long_win_size:-win_size]
985 avg_week_ago = avgs[max(-win_size, -len(avgs))]
987 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
988 rel_change_last = nan
990 rel_change_last = round(
991 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
993 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
994 rel_change_long = nan
996 rel_change_long = round(
997 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
999 if classification_lst:
1000 if isnan(rel_change_last) and isnan(rel_change_long):
1002 if (isnan(last_avg) or
1003 isnan(rel_change_last) or
1004 isnan(rel_change_long)):
1007 [tbl_dict[tst_name]["name"],
1008 round(last_avg / 1000000, 2),
1011 classification_lst[-win_size:].count("regression"),
1012 classification_lst[-win_size:].count("progression")])
1014 tbl_lst.sort(key=lambda rel: rel[0])
1017 for nrr in range(table["window"], -1, -1):
1018 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1019 for nrp in range(table["window"], -1, -1):
1020 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1021 tbl_out.sort(key=lambda rel: rel[2])
1022 tbl_sorted.extend(tbl_out)
1024 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1026 logging.info(" Writing file: '{0}'".format(file_name))
1027 with open(file_name, "w") as file_handler:
1028 file_handler.write(header_str)
1029 for test in tbl_sorted:
1030 file_handler.write(",".join([str(item) for item in test]) + '\n')
1032 txt_file_name = "{0}.txt".format(table["output-file"])
1033 logging.info(" Writing file: '{0}'".format(txt_file_name))
1034 convert_csv_to_pretty_txt(file_name, txt_file_name)
1037 def _generate_url(base, testbed, test_name):
1038 """Generate URL to a trending plot from the name of the test case.
1040 :param base: The base part of URL common to all test cases.
1041 :param testbed: The testbed used for testing.
1042 :param test_name: The name of the test case.
1045 :type test_name: str
1046 :returns: The URL to the plot with the trending data for the given test
1056 if "lbdpdk" in test_name or "lbvpp" in test_name:
1057 file_name = "link_bonding"
1059 elif "114b" in test_name and "vhost" in test_name:
1062 elif "testpmd" in test_name or "l3fwd" in test_name:
1065 elif "memif" in test_name:
1066 file_name = "container_memif"
1069 elif "srv6" in test_name:
1072 elif "vhost" in test_name:
1073 if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
1074 file_name = "vm_vhost_l2"
1075 if "114b" in test_name:
1077 elif "l2xcbase" in test_name and "x520" in test_name:
1078 feature = "-base-l2xc"
1079 elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
1080 feature = "-base-l2bd"
1083 elif "ip4base" in test_name:
1084 file_name = "vm_vhost_ip4"
1087 elif "ipsecbasetnlsw" in test_name:
1088 file_name = "ipsecsw"
1089 feature = "-base-scale"
1091 elif "ipsec" in test_name:
1093 feature = "-base-scale"
1094 if "hw-" in test_name:
1095 file_name = "ipsechw"
1096 elif "sw-" in test_name:
1097 file_name = "ipsecsw"
1099 elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
1100 file_name = "ip4_tunnels"
1103 elif "ip4base" in test_name or "ip4scale" in test_name:
1105 if "xl710" in test_name:
1106 feature = "-base-scale-features"
1107 elif "iacl" in test_name:
1108 feature = "-features-iacl"
1109 elif "oacl" in test_name:
1110 feature = "-features-oacl"
1111 elif "snat" in test_name or "cop" in test_name:
1112 feature = "-features"
1114 feature = "-base-scale"
1116 elif "ip6base" in test_name or "ip6scale" in test_name:
1118 feature = "-base-scale"
1120 elif "l2xcbase" in test_name or "l2xcscale" in test_name \
1121 or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
1122 or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
1124 if "macip" in test_name:
1125 feature = "-features-macip"
1126 elif "iacl" in test_name:
1127 feature = "-features-iacl"
1128 elif "oacl" in test_name:
1129 feature = "-features-oacl"
1131 feature = "-base-scale"
1133 if "x520" in test_name:
1135 elif "x710" in test_name:
1137 elif "xl710" in test_name:
1139 elif "xxv710" in test_name:
1141 elif "vic1227" in test_name:
1143 elif "vic1385" in test_name:
1149 if "64b" in test_name:
1151 elif "78b" in test_name:
1153 elif "imix" in test_name:
1155 elif "9000b" in test_name:
1157 elif "1518b" in test_name:
1159 elif "114b" in test_name:
1163 anchor += framesize + '-'
1165 if "1t1c" in test_name:
1167 elif "2t2c" in test_name:
1169 elif "4t4c" in test_name:
1171 elif "2t1c" in test_name:
1173 elif "4t2c" in test_name:
1175 elif "8t4c" in test_name:
1178 return url + file_name + '-' + testbed + '-' + nic + framesize + \
1179 feature.replace("-int", "").replace("-tnl", "") + anchor + feature
1182 def table_performance_trending_dashboard_html(table, input_data):
1183 """Generate the table(s) with algorithm:
1184 table_performance_trending_dashboard_html specified in the specification
1187 :param table: Table to generate.
1188 :param input_data: Data to process.
1190 :type input_data: InputData
1193 testbed = table.get("testbed", None)
1195 logging.error("The testbed is not defined for the table '{0}'.".
1196 format(table.get("title", "")))
1199 logging.info(" Generating the table {0} ...".
1200 format(table.get("title", "")))
1203 with open(table["input-file"], 'rb') as csv_file:
1204 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1205 csv_lst = [item for item in csv_content]
1207 logging.warning("The input file is not defined.")
1209 except csv.Error as err:
1210 logging.warning("Not possible to process the file '{0}'.\n{1}".
1211 format(table["input-file"], err))
1215 dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
1218 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
1219 for idx, item in enumerate(csv_lst[0]):
1220 alignment = "left" if idx == 0 else "center"
1221 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1225 colors = {"regression": ("#ffcccc", "#ff9999"),
1226 "progression": ("#c6ecc6", "#9fdf9f"),
1227 "normal": ("#e9f1fb", "#d4e4f7")}
1228 for r_idx, row in enumerate(csv_lst[1:]):
1230 color = "regression"
1232 color = "progression"
1235 background = colors[color][r_idx % 2]
1236 tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
1239 for c_idx, item in enumerate(row):
1240 alignment = "left" if c_idx == 0 else "center"
1241 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1244 url = _generate_url("../trending/", testbed, item)
1245 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1250 with open(table["output-file"], 'w') as html_file:
1251 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1252 html_file.write(".. raw:: html\n\n\t")
1253 html_file.write(ET.tostring(dashboard))
1254 html_file.write("\n\t<p><br><br></p>\n")
1256 logging.warning("The output file is not defined.")
1260 def table_last_failed_tests(table, input_data):
1261 """Generate the table(s) with algorithm: table_last_failed_tests
1262 specified in the specification file.
1264 :param table: Table to generate.
1265 :param input_data: Data to process.
1266 :type table: pandas.Series
1267 :type input_data: InputData
1270 logging.info(" Generating the table {0} ...".
1271 format(table.get("title", "")))
1273 # Transform the data
1274 logging.info(" Creating the data set for the {0} '{1}'.".
1275 format(table.get("type", ""), table.get("title", "")))
1276 data = input_data.filter_data(table, continue_on_error=True)
1278 if data is None or data.empty:
1279 logging.warn(" No data for the {0} '{1}'.".
1280 format(table.get("type", ""), table.get("title", "")))
1284 for job, builds in table["data"].items():
1285 for build in builds:
1288 version = input_data.metadata(job, build).get("version", "")
1290 logging.error("Data for {job}: {build} is not present.".
1291 format(job=job, build=build))
1293 tbl_list.append(build)
1294 tbl_list.append(version)
1295 for tst_name, tst_data in data[job][build].iteritems():
1296 if tst_data["status"] != "FAIL":
1298 groups = re.search(REGEX_NIC, tst_data["parent"])
1301 nic = groups.group(0)
1302 tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
1304 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1305 logging.info(" Writing file: '{0}'".format(file_name))
1306 with open(file_name, "w") as file_handler:
1307 for test in tbl_list:
1308 file_handler.write(test + '\n')
1311 def table_failed_tests(table, input_data):
1312 """Generate the table(s) with algorithm: table_failed_tests
1313 specified in the specification file.
1315 :param table: Table to generate.
1316 :param input_data: Data to process.
1317 :type table: pandas.Series
1318 :type input_data: InputData
1321 logging.info(" Generating the table {0} ...".
1322 format(table.get("title", "")))
1324 # Transform the data
1325 logging.info(" Creating the data set for the {0} '{1}'.".
1326 format(table.get("type", ""), table.get("title", "")))
1327 data = input_data.filter_data(table, continue_on_error=True)
1329 # Prepare the header of the tables
1330 header = ["Test Case",
1332 "Last Failure [Time]",
1333 "Last Failure [VPP-Build-Id]",
1334 "Last Failure [CSIT-Job-Build-Id]"]
1336 # Generate the data for the table according to the model in the table
1340 timeperiod = timedelta(int(table.get("window", 7)))
1343 for job, builds in table["data"].items():
1344 for build in builds:
1346 for tst_name, tst_data in data[job][build].iteritems():
1347 if tst_name.lower() in table.get("ignore-list", list()):
1349 if tbl_dict.get(tst_name, None) is None:
1350 groups = re.search(REGEX_NIC, tst_data["parent"])
1353 nic = groups.group(0)
1354 tbl_dict[tst_name] = {
1355 "name": "{0}-{1}".format(nic, tst_data["name"]),
1356 "data": OrderedDict()}
1358 generated = input_data.metadata(job, build).\
1359 get("generated", "")
1362 then = dt.strptime(generated, "%Y%m%d %H:%M")
1363 if (now - then) <= timeperiod:
1364 tbl_dict[tst_name]["data"][build] = (
1367 input_data.metadata(job, build).get("version", ""),
1369 except (TypeError, KeyError) as err:
1370 logging.warning("tst_name: {} - err: {}".
1371 format(tst_name, repr(err)))
1375 for tst_data in tbl_dict.values():
1377 for val in tst_data["data"].values():
1378 if val[0] == "FAIL":
1380 fails_last_date = val[1]
1381 fails_last_vpp = val[2]
1382 fails_last_csit = val[3]
1384 max_fails = fails_nr if fails_nr > max_fails else max_fails
1385 tbl_lst.append([tst_data["name"],
1389 "mrr-daily-build-{0}".format(fails_last_csit)])
1391 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1393 for nrf in range(max_fails, -1, -1):
1394 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1395 tbl_sorted.extend(tbl_fails)
1396 file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
1398 logging.info(" Writing file: '{0}'".format(file_name))
1399 with open(file_name, "w") as file_handler:
1400 file_handler.write(",".join(header) + "\n")
1401 for test in tbl_sorted:
1402 file_handler.write(",".join([str(item) for item in test]) + '\n')
1404 txt_file_name = "{0}.txt".format(table["output-file"])
1405 logging.info(" Writing file: '{0}'".format(txt_file_name))
1406 convert_csv_to_pretty_txt(file_name, txt_file_name)
1409 def table_failed_tests_html(table, input_data):
1410 """Generate the table(s) with algorithm: table_failed_tests_html
1411 specified in the specification file.
1413 :param table: Table to generate.
1414 :param input_data: Data to process.
1415 :type table: pandas.Series
1416 :type input_data: InputData
1419 testbed = table.get("testbed", None)
1421 logging.error("The testbed is not defined for the table '{0}'.".
1422 format(table.get("title", "")))
1425 logging.info(" Generating the table {0} ...".
1426 format(table.get("title", "")))
1429 with open(table["input-file"], 'rb') as csv_file:
1430 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
1431 csv_lst = [item for item in csv_content]
1433 logging.warning("The input file is not defined.")
1435 except csv.Error as err:
1436 logging.warning("Not possible to process the file '{0}'.\n{1}".
1437 format(table["input-file"], err))
1441 failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
1444 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
1445 for idx, item in enumerate(csv_lst[0]):
1446 alignment = "left" if idx == 0 else "center"
1447 th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
1451 colors = ("#e9f1fb", "#d4e4f7")
1452 for r_idx, row in enumerate(csv_lst[1:]):
1453 background = colors[r_idx % 2]
1454 tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
1457 for c_idx, item in enumerate(row):
1458 alignment = "left" if c_idx == 0 else "center"
1459 td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
1462 url = _generate_url("../trending/", testbed, item)
1463 ref = ET.SubElement(td, "a", attrib=dict(href=url))
1468 with open(table["output-file"], 'w') as html_file:
1469 logging.info(" Writing file: '{0}'".format(table["output-file"]))
1470 html_file.write(".. raw:: html\n\n\t")
1471 html_file.write(ET.tostring(failed_tests))
1472 html_file.write("\n\t<p><br><br></p>\n")
1474 logging.warning("The output file is not defined.")