X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;ds=sidebyside;f=resources%2Ftools%2Fpresentation%2Fgenerator_tables.py;h=c41c6de00419fc9a9cf76cbd1c376677bc0f9674;hb=9dad3f95c2624808aeb9892049c236fa788a55d9;hp=9b9f09f4be2f4c4335fef44039297cde3829a532;hpb=4f5872c1bb23873b3a93cb471aae8700d5ca029d;p=csit.git diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index 9b9f09f4be..c41c6de004 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -405,18 +405,24 @@ def table_performance_comparison(table, input_data): item = [tbl_dict[tst_name]["name"], ] if tbl_dict[tst_name]["ref-data"]: data_t = remove_outliers(tbl_dict[tst_name]["ref-data"], - outlier_constant=table["outlier-const"]) + outlier_const=table["outlier-const"]) # TODO: Specify window size. - item.append(round(mean(data_t) / 1000000, 2)) - item.append(round(stdev(data_t) / 1000000, 2)) + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) else: item.extend([None, None]) if tbl_dict[tst_name]["cmp-data"]: data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"], - outlier_constant=table["outlier-const"]) + outlier_const=table["outlier-const"]) # TODO: Specify window size. - item.append(round(mean(data_t) / 1000000, 2)) - item.append(round(stdev(data_t) / 1000000, 2)) + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) else: item.extend([None, None]) if item[1] is not None and item[3] is not None: @@ -598,16 +604,22 @@ def table_performance_comparison_mrr(table, input_data): data_t = remove_outliers(tbl_dict[tst_name]["ref-data"], outlier_const=table["outlier-const"]) # TODO: Specify window size. - item.append(round(mean(data_t) / 1000000, 2)) - item.append(round(stdev(data_t) / 1000000, 2)) + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) else: item.extend([None, None]) if tbl_dict[tst_name]["cmp-data"]: data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"], outlier_const=table["outlier-const"]) # TODO: Specify window size. - item.append(round(mean(data_t) / 1000000, 2)) - item.append(round(stdev(data_t) / 1000000, 2)) + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) else: item.extend([None, None]) if item[1] is not None and item[3] is not None and item[1] != 0: @@ -677,6 +689,7 @@ def table_performance_trending_dashboard(table, input_data): # Prepare the header of the tables header = ["Test Case", "Throughput Trend [Mpps]", + "Long Trend Compliance", "Trend Compliance", "Top Anomaly [Mpps]", "Change [%]", @@ -706,12 +719,18 @@ def table_performance_trending_dashboard(table, input_data): if len(tbl_dict[tst_name]["data"]) > 2: pd_data = pd.Series(tbl_dict[tst_name]["data"]) - win_size = pd_data.size \ - if pd_data.size < table["window"] else table["window"] + win_size = min(pd_data.size, table["window"]) # Test name: name = tbl_dict[tst_name]["name"] median = pd_data.rolling(window=win_size, min_periods=2).median() + median_idx = pd_data.size - table["long-trend-window"] + median_idx = 0 if median_idx < 0 else median_idx + try: + max_median = max([x for x in median.values[median_idx:] + if not isnan(x)]) + except ValueError: + max_median = None trimmed_data, _ = split_outliers(pd_data, outlier_const=1.5, window=win_size) stdev_t = pd_data.rolling(window=win_size, min_periods=2).std() @@ -781,68 +800,73 @@ def table_performance_trending_dashboard(table, input_data): else: tmp_classification = "outlier" if classification == "failure" \ else classification + index = None for idx in range(first_idx, len(classification_lst)): if classification_lst[idx] == tmp_classification: - index = idx - break + if rel_change_lst[idx]: + index = idx + break + if index is None: + continue for idx in range(index+1, len(classification_lst)): if classification_lst[idx] == tmp_classification: - if rel_change_lst[idx] > rel_change_lst[index]: - index = idx - - # if "regression" in classification_lst[first_idx:]: - # classification = "regression" - # elif "outlier" in classification_lst[first_idx:]: - # classification = "outlier" - # elif "progression" in classification_lst[first_idx:]: - # classification = "progression" - # elif "normal" in classification_lst[first_idx:]: - # classification = "normal" - # else: - # classification = None - # - # nr_outliers = 0 - # consecutive_outliers = 0 - # failure = False - # for item in classification_lst[first_idx:]: - # if item == "outlier": - # nr_outliers += 1 - # consecutive_outliers += 1 - # if consecutive_outliers == 3: - # failure = True - # else: - # consecutive_outliers = 0 - # - # idx = len(classification_lst) - 1 - # while idx: - # if classification_lst[idx] == classification: - # break - # idx -= 1 - # - # if failure: - # classification = "failure" - # elif classification == "outlier": - # classification = "normal" - - trend = round(float(median_lst[-1]) / 1000000, 2) \ - if not isnan(median_lst[-1]) else '' - sample = round(float(sample_lst[index]) / 1000000, 2) \ - if not isnan(sample_lst[index]) else '' - rel_change = rel_change_lst[index] \ - if rel_change_lst[index] is not None else '' - tbl_lst.append([name, - trend, - classification, - '-' if classification == "normal" else sample, - '-' if classification == "normal" else rel_change, - nr_outliers]) + if rel_change_lst[idx]: + if (abs(rel_change_lst[idx]) > + abs(rel_change_lst[index])): + index = idx + + logging.debug("{}".format(name)) + logging.debug("sample_lst: {} - {}". + format(len(sample_lst), sample_lst)) + logging.debug("median_lst: {} - {}". + format(len(median_lst), median_lst)) + logging.debug("rel_change: {} - {}". + format(len(rel_change_lst), rel_change_lst)) + logging.debug("classn_lst: {} - {}". + format(len(classification_lst), classification_lst)) + logging.debug("index: {}".format(index)) + logging.debug("classifica: {}".format(classification)) + + try: + trend = round(float(median_lst[-1]) / 1000000, 2) \ + if not isnan(median_lst[-1]) else '-' + sample = round(float(sample_lst[index]) / 1000000, 2) \ + if not isnan(sample_lst[index]) else '-' + rel_change = rel_change_lst[index] \ + if rel_change_lst[index] is not None else '-' + if max_median is not None: + if not isnan(sample_lst[index]): + long_trend_threshold = \ + max_median * (table["long-trend-threshold"] / 100) + if sample_lst[index] < long_trend_threshold: + long_trend_classification = "failure" + else: + long_trend_classification = 'normal' + else: + long_trend_classification = "failure" + else: + long_trend_classification = '-' + tbl_lst.append([name, + trend, + long_trend_classification, + classification, + '-' if classification == "normal" else sample, + '-' if classification == "normal" else + rel_change, + nr_outliers]) + except IndexError as err: + logging.error("{}".format(err)) + continue # Sort the table according to the classification tbl_sorted = list() - for classification in ("failure", "regression", "progression", "normal"): - tbl_tmp = [item for item in tbl_lst if item[2] == classification] - tbl_tmp.sort(key=lambda rel: rel[0]) - tbl_sorted.extend(tbl_tmp) + for long_trend_class in ("failure", 'normal', '-'): + tbl_long = [item for item in tbl_lst if item[2] == long_trend_class] + for classification in \ + ("failure", "regression", "progression", "normal"): + tbl_tmp = [item for item in tbl_long if item[3] == classification] + tbl_tmp.sort(key=lambda rel: rel[0]) + tbl_sorted.extend(tbl_tmp) file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"]) @@ -978,7 +1002,7 @@ def table_performance_trending_dashboard_html(table, input_data): ref = ET.SubElement(td, "a", attrib=dict(href=url)) ref.text = item - if c_idx == 2: + if c_idx == 3: if item == "regression": td.set("bgcolor", "#eca1a6") elif item == "failure":