X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_tables.py;h=6d81f43ba43bfb387f3f780b48ff1ad4239688d6;hb=refs%2Fchanges%2F01%2F13801%2F14;hp=6aa57db796dfa57743871ca036c359df2eb32aec;hpb=2d001ed910d3835848fccb7bb96a98a5270698fe;p=csit.git diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index 6aa57db796..6d81f43ba4 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -17,7 +17,6 @@ import logging import csv -import pandas as pd from string import replace from collections import OrderedDict @@ -25,8 +24,8 @@ from numpy import nan, isnan from xml.etree import ElementTree as ET from errors import PresentationError -from utils import mean, stdev, relative_change, remove_outliers,\ - split_outliers, classify_anomalies, convert_csv_to_pretty_txt +from utils import mean, stdev, relative_change, classify_anomalies, \ + convert_csv_to_pretty_txt def generate_tables(spec, data): @@ -185,6 +184,8 @@ def table_performance_improvements(table, input_data): """Generate the table(s) with algorithm: table_performance_improvements specified in the specification file. + # FIXME: Not used now. + :param table: Table to generate. :param input_data: Data to process. :type table: pandas.Series @@ -333,6 +334,8 @@ def table_performance_improvements(table, input_data): def _read_csv_template(file_name): """Read the template from a .csv file. + # FIXME: Not used now. + :param file_name: Name / full path / relative path of the file to read. :type file_name: str :returns: Data from the template as list (lines) of lists (items on line). @@ -372,18 +375,23 @@ def table_performance_comparison(table, input_data): try: header = ["Test case", ] + if table["include-tests"] == "MRR": + hdr_param = "Receive Rate" + else: + hdr_param = "Throughput" + history = table.get("history", None) if history: for item in history: header.extend( - ["{0} Throughput [Mpps]".format(item["title"]), + ["{0} {1} [Mpps]".format(item["title"], hdr_param), "{0} Stdev [Mpps]".format(item["title"])]) header.extend( - ["{0} Throughput [Mpps]".format(table["reference"]["title"]), + ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param), "{0} Stdev [Mpps]".format(table["reference"]["title"]), - "{0} Throughput [Mpps]".format(table["compare"]["title"]), + "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param), "{0} Stdev [Mpps]".format(table["compare"]["title"]), - "Change [%]"]) + "Delta [%]"]) header_str = ",".join(header) + "\n" except (AttributeError, KeyError) as err: logging.error("The model is invalid, missing parameter: {0}". @@ -395,45 +403,116 @@ def table_performance_comparison(table, input_data): for job, builds in table["reference"]["data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].iteritems(): - if tbl_dict.get(tst_name, None) is None: + tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\ + replace("-ndrpdr", "").replace("-pdrdisc", "").\ + replace("-ndrdisc", "").replace("-pdr", "").\ + replace("-ndr", "") + if tbl_dict.get(tst_name_mod, None) is None: name = "{0}-{1}".format(tst_data["parent"].split("-")[0], "-".join(tst_data["name"]. - split("-")[1:])) - tbl_dict[tst_name] = {"name": name, - "ref-data": list(), - "cmp-data": list()} + split("-")[:-1])) + tbl_dict[tst_name_mod] = {"name": name, + "ref-data": list(), + "cmp-data": list()} try: - tbl_dict[tst_name]["ref-data"].\ - append(tst_data["throughput"]["value"]) + # TODO: Re-work when NDRPDRDISC tests are not used + if table["include-tests"] == "MRR": + tbl_dict[tst_name_mod]["ref-data"]. \ + append(tst_data["result"]["receive-rate"].avg) + elif table["include-tests"] == "PDR": + if tst_data["type"] == "PDR": + tbl_dict[tst_name_mod]["ref-data"]. \ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["ref-data"].append( + tst_data["throughput"]["PDR"]["LOWER"]) + elif table["include-tests"] == "NDR": + if tst_data["type"] == "NDR": + tbl_dict[tst_name_mod]["ref-data"]. \ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["ref-data"].append( + tst_data["throughput"]["NDR"]["LOWER"]) + else: + continue except TypeError: pass # No data in output.xml for this test for job, builds in table["compare"]["data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].iteritems(): + tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \ + replace("-ndrpdr", "").replace("-pdrdisc", ""). \ + replace("-ndrdisc", "").replace("-pdr", ""). \ + replace("-ndr", "") try: - tbl_dict[tst_name]["cmp-data"].\ - append(tst_data["throughput"]["value"]) + # TODO: Re-work when NDRPDRDISC tests are not used + if table["include-tests"] == "MRR": + tbl_dict[tst_name_mod]["cmp-data"]. \ + append(tst_data["result"]["receive-rate"].avg) + elif table["include-tests"] == "PDR": + if tst_data["type"] == "PDR": + tbl_dict[tst_name_mod]["cmp-data"]. \ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["cmp-data"].append( + tst_data["throughput"]["PDR"]["LOWER"]) + elif table["include-tests"] == "NDR": + if tst_data["type"] == "NDR": + tbl_dict[tst_name_mod]["cmp-data"]. \ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["cmp-data"].append( + tst_data["throughput"]["NDR"]["LOWER"]) + else: + continue except KeyError: pass except TypeError: - tbl_dict.pop(tst_name, None) + tbl_dict.pop(tst_name_mod, None) if history: for item in history: for job, builds in item["data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].iteritems(): - if tbl_dict.get(tst_name, None) is None: + tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \ + replace("-ndrpdr", "").replace("-pdrdisc", ""). \ + replace("-ndrdisc", "").replace("-pdr", ""). \ + replace("-ndr", "") + if tbl_dict.get(tst_name_mod, None) is None: continue - if tbl_dict[tst_name].get("history", None) is None: - tbl_dict[tst_name]["history"] = OrderedDict() - if tbl_dict[tst_name]["history"].get(item["title"], + if tbl_dict[tst_name_mod].get("history", None) is None: + tbl_dict[tst_name_mod]["history"] = OrderedDict() + if tbl_dict[tst_name_mod]["history"].get(item["title"], None) is None: - tbl_dict[tst_name]["history"][item["title"]] = \ + tbl_dict[tst_name_mod]["history"][item["title"]] = \ list() try: - tbl_dict[tst_name]["history"][item["title"]].\ - append(tst_data["throughput"]["value"]) + # TODO: Re-work when NDRPDRDISC tests are not used + if table["include-tests"] == "MRR": + tbl_dict[tst_name_mod]["history"][item["title" + ]].append(tst_data["result"]["receive-rate"]. + avg) + elif table["include-tests"] == "PDR": + if tst_data["type"] == "PDR": + tbl_dict[tst_name_mod]["history"][ + item["title"]].\ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["history"][item[ + "title"]].append(tst_data["throughput"][ + "PDR"]["LOWER"]) + elif table["include-tests"] == "NDR": + if tst_data["type"] == "NDR": + tbl_dict[tst_name_mod]["history"][ + item["title"]].\ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["history"][item[ + "title"]].append(tst_data["throughput"][ + "NDR"]["LOWER"]) + else: + continue except (TypeError, KeyError): pass @@ -444,37 +523,22 @@ def table_performance_comparison(table, input_data): if tbl_dict[tst_name].get("history", None) is not None: for hist_data in tbl_dict[tst_name]["history"].values(): if hist_data: - data_t = remove_outliers( - hist_data, outlier_const=table["outlier-const"]) - if data_t: - item.append(round(mean(data_t) / 1000000, 2)) - item.append(round(stdev(data_t) / 1000000, 2)) - else: - item.extend([None, None]) + item.append(round(mean(hist_data) / 1000000, 2)) + item.append(round(stdev(hist_data) / 1000000, 2)) else: item.extend([None, None]) else: item.extend([None, None]) - if tbl_dict[tst_name]["ref-data"]: - data_t = remove_outliers(tbl_dict[tst_name]["ref-data"], - outlier_const=table["outlier-const"]) - # TODO: Specify window size. - if data_t: - item.append(round(mean(data_t) / 1000000, 2)) - item.append(round(stdev(data_t) / 1000000, 2)) - else: - item.extend([None, None]) + data_t = tbl_dict[tst_name]["ref-data"] + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) else: item.extend([None, None]) - if tbl_dict[tst_name]["cmp-data"]: - data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"], - outlier_const=table["outlier-const"]) - # TODO: Specify window size. - if data_t: - item.append(round(mean(data_t) / 1000000, 2)) - item.append(round(stdev(data_t) / 1000000, 2)) - else: - item.extend([None, None]) + data_t = tbl_dict[tst_name]["cmp-data"] + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) else: item.extend([None, None]) if item[-4] is not None and item[-2] is not None and item[-4] != 0: @@ -485,227 +549,19 @@ def table_performance_comparison(table, input_data): # Sort the table according to the relative change tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) - # Generate tables: - # All tests in csv: - tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"], - table["output-file-ext"]), - "{0}-ndr-2t2c-full{1}".format(table["output-file"], - table["output-file-ext"]), - "{0}-ndr-4t4c-full{1}".format(table["output-file"], - table["output-file-ext"]), - "{0}-pdr-1t1c-full{1}".format(table["output-file"], - table["output-file-ext"]), - "{0}-pdr-2t2c-full{1}".format(table["output-file"], - table["output-file-ext"]), - "{0}-pdr-4t4c-full{1}".format(table["output-file"], - table["output-file-ext"]) - ] - for file_name in tbl_names: - logging.info(" Writing file: '{0}'".format(file_name)) - with open(file_name, "w") as file_handler: - file_handler.write(header_str) - for test in tbl_lst: - if (file_name.split("-")[-3] in test[0] and # NDR vs PDR - file_name.split("-")[-2] in test[0]): # cores - test[0] = "-".join(test[0].split("-")[:-1]) - file_handler.write(",".join([str(item) for item in test]) + - "\n") - - # All tests in txt: - tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]), - "{0}-ndr-2t2c-full.txt".format(table["output-file"]), - "{0}-ndr-4t4c-full.txt".format(table["output-file"]), - "{0}-pdr-1t1c-full.txt".format(table["output-file"]), - "{0}-pdr-2t2c-full.txt".format(table["output-file"]), - "{0}-pdr-4t4c-full.txt".format(table["output-file"]) - ] - - for i, txt_name in enumerate(tbl_names_txt): - logging.info(" Writing file: '{0}'".format(txt_name)) - convert_csv_to_pretty_txt(tbl_names[i], txt_name) - - # Selected tests in csv: - input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"], - table["output-file-ext"]) - with open(input_file, "r") as in_file: - lines = list() - for line in in_file: - lines.append(line) - - output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"], - table["output-file-ext"]) - logging.info(" Writing file: '{0}'".format(output_file)) - with open(output_file, "w") as out_file: - out_file.write(header_str) - for i, line in enumerate(lines[1:]): - if i == table["nr-of-tests-shown"]: - break - out_file.write(line) - - output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"], - table["output-file-ext"]) - logging.info(" Writing file: '{0}'".format(output_file)) - with open(output_file, "w") as out_file: - out_file.write(header_str) - for i, line in enumerate(lines[-1:0:-1]): - if i == table["nr-of-tests-shown"]: - break - out_file.write(line) - - input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"], - table["output-file-ext"]) - with open(input_file, "r") as in_file: - lines = list() - for line in in_file: - lines.append(line) - - output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"], - table["output-file-ext"]) - logging.info(" Writing file: '{0}'".format(output_file)) - with open(output_file, "w") as out_file: - out_file.write(header_str) - for i, line in enumerate(lines[1:]): - if i == table["nr-of-tests-shown"]: - break - out_file.write(line) - - output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"], - table["output-file-ext"]) - logging.info(" Writing file: '{0}'".format(output_file)) - with open(output_file, "w") as out_file: - out_file.write(header_str) - for i, line in enumerate(lines[-1:0:-1]): - if i == table["nr-of-tests-shown"]: - break - out_file.write(line) - - -def table_performance_comparison_mrr(table, input_data): - """Generate the table(s) with algorithm: table_performance_comparison_mrr - specified in the specification file. - - :param table: Table to generate. - :param input_data: Data to process. - :type table: pandas.Series - :type input_data: InputData - """ - - logging.info(" Generating the table {0} ...". - format(table.get("title", ""))) - - # Transform the data - logging.info(" Creating the data set for the {0} '{1}'.". - format(table.get("type", ""), table.get("title", ""))) - data = input_data.filter_data(table, continue_on_error=True) - - # Prepare the header of the tables - try: - header = ["Test case", - "{0} Throughput [Mpps]".format(table["reference"]["title"]), - "{0} stdev [Mpps]".format(table["reference"]["title"]), - "{0} Throughput [Mpps]".format(table["compare"]["title"]), - "{0} stdev [Mpps]".format(table["compare"]["title"]), - "Change [%]"] - header_str = ",".join(header) + "\n" - except (AttributeError, KeyError) as err: - logging.error("The model is invalid, missing parameter: {0}". - format(err)) - return - - # Prepare data to the table: - tbl_dict = dict() - for job, builds in table["reference"]["data"].items(): - for build in builds: - for tst_name, tst_data in data[job][str(build)].iteritems(): - if tbl_dict.get(tst_name, None) is None: - name = "{0}-{1}".format(tst_data["parent"].split("-")[0], - "-".join(tst_data["name"]. - split("-")[1:])) - tbl_dict[tst_name] = {"name": name, - "ref-data": list(), - "cmp-data": list()} - try: - tbl_dict[tst_name]["ref-data"].\ - append(tst_data["result"]["throughput"]) - except TypeError: - pass # No data in output.xml for this test - - for job, builds in table["compare"]["data"].items(): - for build in builds: - for tst_name, tst_data in data[job][str(build)].iteritems(): - try: - tbl_dict[tst_name]["cmp-data"].\ - append(tst_data["result"]["throughput"]) - except KeyError: - pass - except TypeError: - tbl_dict.pop(tst_name, None) - - tbl_lst = list() - for tst_name in tbl_dict.keys(): - item = [tbl_dict[tst_name]["name"], ] - if tbl_dict[tst_name]["ref-data"]: - data_t = remove_outliers(tbl_dict[tst_name]["ref-data"], - outlier_const=table["outlier-const"]) - # TODO: Specify window size. - if data_t: - item.append(round(mean(data_t) / 1000000, 2)) - item.append(round(stdev(data_t) / 1000000, 2)) - else: - item.extend([None, None]) - else: - item.extend([None, None]) - if tbl_dict[tst_name]["cmp-data"]: - data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"], - outlier_const=table["outlier-const"]) - # TODO: Specify window size. - if data_t: - item.append(round(mean(data_t) / 1000000, 2)) - item.append(round(stdev(data_t) / 1000000, 2)) - else: - item.extend([None, None]) - else: - item.extend([None, None]) - if item[1] is not None and item[3] is not None and item[1] != 0: - item.append(int(relative_change(float(item[1]), float(item[3])))) - if len(item) == 6: - tbl_lst.append(item) - - # Sort the table according to the relative change - tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) - - # Generate tables: - # All tests in csv: - tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"], - table["output-file-ext"]), - "{0}-2t2c-full{1}".format(table["output-file"], - table["output-file-ext"]), - "{0}-4t4c-full{1}".format(table["output-file"], - table["output-file-ext"]) - ] - for file_name in tbl_names: - logging.info(" Writing file: '{0}'".format(file_name)) - with open(file_name, "w") as file_handler: - file_handler.write(header_str) - for test in tbl_lst: - if file_name.split("-")[-2] in test[0]: # cores - test[0] = "-".join(test[0].split("-")[:-1]) - file_handler.write(",".join([str(item) for item in test]) + - "\n") - - # All tests in txt: - tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]), - "{0}-2t2c-full.txt".format(table["output-file"]), - "{0}-4t4c-full.txt".format(table["output-file"]) - ] + # Generate csv tables: + csv_file = "{0}.csv".format(table["output-file"]) + with open(csv_file, "w") as file_handler: + file_handler.write(header_str) + for test in tbl_lst: + file_handler.write(",".join([str(item) for item in test]) + "\n") - for i, txt_name in enumerate(tbl_names_txt): - logging.info(" Writing file: '{0}'".format(txt_name)) - convert_csv_to_pretty_txt(tbl_names[i], txt_name) + convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"])) def table_performance_trending_dashboard(table, input_data): - """Generate the table(s) with algorithm: table_performance_comparison + """Generate the table(s) with algorithm: + table_performance_trending_dashboard specified in the specification file. :param table: Table to generate. @@ -728,8 +584,7 @@ def table_performance_trending_dashboard(table, input_data): "Short-Term Change [%]", "Long-Term Change [%]", "Regressions [#]", - "Progressions [#]", - "Outliers [#]" + "Progressions [#]" ] header_str = ",".join(header) + "\n" @@ -742,73 +597,58 @@ def table_performance_trending_dashboard(table, input_data): continue if tbl_dict.get(tst_name, None) is None: name = "{0}-{1}".format(tst_data["parent"].split("-")[0], - "-".join(tst_data["name"]. - split("-")[1:])) + tst_data["name"]) tbl_dict[tst_name] = {"name": name, "data": OrderedDict()} try: - tbl_dict[tst_name]["data"][str(build)] = \ - tst_data["result"]["throughput"] + tbl_dict[tst_name]["data"][str(build)] = \ + tst_data["result"]["receive-rate"] except (TypeError, KeyError): pass # No data in output.xml for this test tbl_lst = list() for tst_name in tbl_dict.keys(): - if len(tbl_dict[tst_name]["data"]) < 3: + data_t = tbl_dict[tst_name]["data"] + if len(data_t) < 2: continue - pd_data = pd.Series(tbl_dict[tst_name]["data"]) - data_t, _ = split_outliers(pd_data, outlier_const=1.5, - window=table["window"]) - last_key = data_t.keys()[-1] - win_size = min(data_t.size, table["window"]) - win_first_idx = data_t.size - win_size - key_14 = data_t.keys()[win_first_idx] - long_win_size = min(data_t.size, table["long-trend-window"]) - median_t = data_t.rolling(window=win_size, min_periods=2).median() - median_first_idx = median_t.size - long_win_size + classification_lst, avgs = classify_anomalies(data_t) + + win_size = min(len(data_t), table["window"]) + long_win_size = min(len(data_t), table["long-trend-window"]) + try: - max_median = max( - [x for x in median_t.values[median_first_idx:-win_size] + max_long_avg = max( + [x for x in avgs[-long_win_size:-win_size] if not isnan(x)]) except ValueError: - max_median = nan - try: - last_median_t = median_t[last_key] - except KeyError: - last_median_t = nan - try: - median_t_14 = median_t[key_14] - except KeyError: - median_t_14 = nan + max_long_avg = nan + last_avg = avgs[-1] + avg_week_ago = avgs[max(-win_size, -len(avgs))] - if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0: + if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0: rel_change_last = nan else: rel_change_last = round( - ((last_median_t - median_t_14) / median_t_14) * 100, 2) + ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2) - if isnan(max_median) or isnan(last_median_t) or max_median == 0.0: + if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0: rel_change_long = nan else: rel_change_long = round( - ((last_median_t - max_median) / max_median) * 100, 2) - - # Classification list: - classification_lst = classify_anomalies(data_t, window=14) + ((last_avg - max_long_avg) / max_long_avg) * 100, 2) if classification_lst: if isnan(rel_change_last) and isnan(rel_change_long): continue tbl_lst.append( [tbl_dict[tst_name]["name"], - '-' if isnan(last_median_t) else - round(last_median_t / 1000000, 2), + '-' if isnan(last_avg) else + round(last_avg / 1000000, 2), '-' if isnan(rel_change_last) else rel_change_last, '-' if isnan(rel_change_long) else rel_change_long, - classification_lst[win_first_idx:].count("regression"), - classification_lst[win_first_idx:].count("progression"), - classification_lst[win_first_idx:].count("outlier")]) + classification_lst[-win_size:].count("regression"), + classification_lst[-win_size:].count("progression")]) tbl_lst.sort(key=lambda rel: rel[0]) @@ -816,11 +656,9 @@ def table_performance_trending_dashboard(table, input_data): for nrr in range(table["window"], -1, -1): tbl_reg = [item for item in tbl_lst if item[4] == nrr] for nrp in range(table["window"], -1, -1): - tbl_pro = [item for item in tbl_reg if item[5] == nrp] - for nro in range(table["window"], -1, -1): - tbl_out = [item for item in tbl_pro if item[6] == nro] - tbl_out.sort(key=lambda rel: rel[2]) - tbl_sorted.extend(tbl_out) + tbl_out = [item for item in tbl_reg if item[5] == nrp] + tbl_out.sort(key=lambda rel: rel[2]) + tbl_sorted.extend(tbl_out) file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"]) @@ -958,15 +796,12 @@ def table_performance_trending_dashboard_html(table, input_data): # Rows: colors = {"regression": ("#ffcccc", "#ff9999"), "progression": ("#c6ecc6", "#9fdf9f"), - "outlier": ("#e6e6e6", "#cccccc"), "normal": ("#e9f1fb", "#d4e4f7")} for r_idx, row in enumerate(csv_lst[1:]): if int(row[4]): color = "regression" elif int(row[5]): color = "progression" - elif int(row[6]): - color = "outlier" else: color = "normal" background = colors[color][r_idx % 2] @@ -1014,10 +849,10 @@ def table_failed_tests(table, input_data): # Prepare the header of the tables header = ["Test Case", - "Fails [#]", - "Last Fail [Timestamp]", - "Last Fail [VPP Build]", - "Last Fail [CSIT Build]"] + "Failures [#]", + "Last Failure [Time]", + "Last Failure [VPP-Build-Id]", + "Last Failure [CSIT-Job-Build-Id]"] # Generate the data for the table according to the model in the table # specification @@ -1030,8 +865,7 @@ def table_failed_tests(table, input_data): continue if tbl_dict.get(tst_name, None) is None: name = "{0}-{1}".format(tst_data["parent"].split("-")[0], - "-".join(tst_data["name"]. - split("-")[1:])) + tst_data["name"]) tbl_dict[tst_name] = {"name": name, "data": OrderedDict()} try: @@ -1114,14 +948,9 @@ def table_failed_tests_html(table, input_data): th.text = item # Rows: - colors = {"very-bad": ("#ffcccc", "#ff9999"), - "bad": ("#e9f1fb", "#d4e4f7")} + colors = ("#e9f1fb", "#d4e4f7") for r_idx, row in enumerate(csv_lst[1:]): - if int(row[1]) > 7: - color = "very-bad" - else: - color = "bad" - background = colors[color][r_idx % 2] + background = colors[r_idx % 2] tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background)) # Columns: