- classification = None
-
- nr_outliers = 0
- consecutive_outliers = 0
- failure = False
- for item in classification_lst[first_idx:]:
- if item == "outlier":
- nr_outliers += 1
- consecutive_outliers += 1
- if consecutive_outliers == 3:
- failure = True
- else:
- consecutive_outliers = 0
-
- idx = len(classification_lst) - 1
- while idx:
- if classification_lst[idx] == classification:
- break
- idx -= 1
-
- if failure:
- classification = "failure"
- elif classification == "outlier":
- classification = "normal"
-
- trend = round(float(median_lst[-1]) / 1000000, 2) \
- if not isnan(median_lst[-1]) else ''
- sample = round(float(sample_lst[idx]) / 1000000, 2) \
- if not isnan(sample_lst[idx]) else ''
- rel_change = rel_change_lst[idx] \
- if rel_change_lst[idx] is not None else ''
- tbl_lst.append([name,
- trend,
- classification,
- '-' if classification == "normal" else sample,
- '-' if classification == "normal" else rel_change,
- nr_outliers])
-
- # Sort the table according to the classification
+ rel_change_last = round(
+ ((last_median_t - median_t_14) / median_t_14) * 100, 2)
+
+ if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
+ rel_change_long = nan
+ else:
+ rel_change_long = round(
+ ((last_median_t - max_median) / max_median) * 100, 2)
+
+ logging.info("rel_change_last : {}".format(rel_change_last))
+ logging.info("rel_change_long : {}".format(rel_change_long))
+
+ tbl_lst.append(
+ [name,
+ '-' if isnan(last_median_t) else
+ round(last_median_t / 1000000, 2),
+ '-' if isnan(rel_change_last) else rel_change_last,
+ '-' if isnan(rel_change_long) else rel_change_long,
+ classification_lst[win_first_idx:].count("regression"),
+ classification_lst[win_first_idx:].count("progression"),
+ classification_lst[win_first_idx:].count("outlier")])
+
+ tbl_lst.sort(key=lambda rel: rel[0])
+