X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_tables.py;h=a5a8824dc91ea6e5d8470e46671a8c9cc4934931;hp=6c301878ce4e5cb04e581f059ebe677deecc1009;hb=29035746be0145db942832b55555b695da2323d7;hpb=4a40c75ffc9f6b62a2eb58007675ef17f4c32b1e diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index 6c301878ce..a5a8824dc9 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -18,7 +18,6 @@ import logging import csv import prettytable -import numpy as np import pandas as pd from string import replace @@ -441,7 +440,7 @@ def table_performance_comparison(table, input_data): table["output-file-ext"]) ] for file_name in tbl_names: - logging.info(" Writing file: '{}'".format(file_name)) + logging.info(" Writing file: '{0}'".format(file_name)) with open(file_name, "w") as file_handler: file_handler.write(header_str) for test in tbl_lst: @@ -462,7 +461,7 @@ def table_performance_comparison(table, input_data): for i, txt_name in enumerate(tbl_names_txt): txt_table = None - logging.info(" Writing file: '{}'".format(txt_name)) + logging.info(" Writing file: '{0}'".format(txt_name)) with open(tbl_names[i], 'rb') as csv_file: csv_content = csv.reader(csv_file, delimiter=',', quotechar='"') for row in csv_content: @@ -484,7 +483,7 @@ def table_performance_comparison(table, input_data): output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"], table["output-file-ext"]) - logging.info(" Writing file: '{}'".format(output_file)) + logging.info(" Writing file: '{0}'".format(output_file)) with open(output_file, "w") as out_file: out_file.write(header_str) for i, line in enumerate(lines[1:]): @@ -494,7 +493,7 @@ def table_performance_comparison(table, input_data): output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"], table["output-file-ext"]) - logging.info(" Writing file: '{}'".format(output_file)) + logging.info(" Writing file: '{0}'".format(output_file)) with open(output_file, "w") as out_file: out_file.write(header_str) for i, line in enumerate(lines[-1:0:-1]): @@ -511,7 +510,7 @@ def table_performance_comparison(table, input_data): output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"], table["output-file-ext"]) - logging.info(" Writing file: '{}'".format(output_file)) + logging.info(" Writing file: '{0}'".format(output_file)) with open(output_file, "w") as out_file: out_file.write(header_str) for i, line in enumerate(lines[1:]): @@ -521,7 +520,7 @@ def table_performance_comparison(table, input_data): output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"], table["output-file-ext"]) - logging.info(" Writing file: '{}'".format(output_file)) + logging.info(" Writing file: '{0}'".format(output_file)) with open(output_file, "w") as out_file: out_file.write(header_str) for i, line in enumerate(lines[-1:0:-1]): @@ -549,9 +548,9 @@ def table_performance_trending_dashboard(table, input_data): # Prepare the header of the tables header = ["Test case", "Thput trend [Mpps]", - "Change [Mpps]", + "Anomaly [Mpps]", "Change [%]", - "Anomaly"] + "Classification"] header_str = ",".join(header) + "\n" # Prepare data to the table: @@ -567,8 +566,8 @@ def table_performance_trending_dashboard(table, input_data): "data": list()} try: tbl_dict[tst_name]["data"]. \ - append(tst_data["throughput"]["value"]) - except TypeError: + append(tst_data["result"]["throughput"]) + except (TypeError, KeyError): pass # No data in output.xml for this test tbl_lst = list() @@ -580,45 +579,56 @@ def table_performance_trending_dashboard(table, input_data): # Test name: name = tbl_dict[tst_name]["name"] # Throughput trend: - trend = list(pd_data.rolling(window=win_size).median())[-2] + trend = list(pd_data.rolling(window=win_size, min_periods=2). + median())[-2] # Anomaly: t_data, _ = find_outliers(pd_data) last = list(t_data)[-1] t_stdev = list(t_data.rolling(window=win_size, min_periods=2). std())[-2] if isnan(last): - anomaly = "outlier" + classification = "outlier" + last = list(pd_data)[-1] elif last < (trend - 3 * t_stdev): - anomaly = "regression" + classification = "regression" elif last > (trend + 3 * t_stdev): - anomaly = "progression" + classification = "progression" else: - anomaly = "normal" - # Change: - change = round(float(last - trend) / 1000000, 2) - # Relative change: - rel_change = int(relative_change(float(trend), float(last))) - - tbl_lst.append([name, - round(float(last) / 1000000, 2), - change, - rel_change, - anomaly]) + classification = "normal" + + if not isnan(last) and not isnan(trend) and trend != 0: + # Change: + change = round(float(last - trend) / 1000000, 2) + # Relative change: + rel_change = int(relative_change(float(trend), float(last))) + + tbl_lst.append([name, + round(float(trend) / 1000000, 2), + last, + rel_change, + classification]) # Sort the table according to the relative change - tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) + # tbl_lst.sort(key=lambda rel: rel[-2], reverse=True) + + # Sort the table according to the classification + tbl_sorted = list() + for classification in ("regression", "outlier", "progression", "normal"): + tbl_tmp = [item for item in tbl_lst if item[4] == classification] + tbl_tmp.sort(key=lambda rel: rel[0]) + tbl_sorted.extend(tbl_tmp) - file_name = "{}.{}".format(table["output-file"], table["output-file-ext"]) + file_name = "{0}.{1}".format(table["output-file"], table["output-file-ext"]) - logging.info(" Writing file: '{}'".format(file_name)) + logging.info(" Writing file: '{0}'".format(file_name)) with open(file_name, "w") as file_handler: file_handler.write(header_str) - for test in tbl_lst: + for test in tbl_sorted: file_handler.write(",".join([str(item) for item in test]) + '\n') - txt_file_name = "{}.txt".format(table["output-file"]) + txt_file_name = "{0}.txt".format(table["output-file"]) txt_table = None - logging.info(" Writing file: '{}'".format(txt_file_name)) + logging.info(" Writing file: '{0}'".format(txt_file_name)) with open(file_name, 'rb') as csv_file: csv_content = csv.reader(csv_file, delimiter=',', quotechar='"') for row in csv_content: