adding trend in Mpps, runs for trend in #, trend change in %
+ dashboard - removed Short-Term change and added # of runs for trend
Signed-off-by: Viliam Luc <vluc@cisco.com>
Change-Id: Ib02d2a2224fc52b79832560241b0530aa2eaaf77
"""Generation of Continuous Performance Trending and Analysis.
"""
"""Generation of Continuous Performance Trending and Analysis.
"""
import re
import logging
import csv
import re
import logging
import csv
from collections import OrderedDict
from datetime import datetime
from copy import deepcopy
from collections import OrderedDict
from datetime import datetime
from copy import deepcopy
import prettytable
import plotly.offline as ploff
import prettytable
import plotly.offline as ploff
# Evaluate result:
if anomaly_classifications:
# Evaluate result:
if anomaly_classifications:
+ legend_str = (f"Legend:\n[ Last trend in Mpps/Mcps | number of runs for"
+ f" last trend | ")
result = u"PASS"
for job_name, job_data in anomaly_classifications.items():
result = u"PASS"
for job_name, job_data in anomaly_classifications.items():
+ data = []
+ tb = u"-".join(job_name.split(u"-")[-2:])
+ for file in listdir(f"{spec.cpta[u'output-file']}"):
+ if tb in file and u"performance-trending-dashboard" in \
+ file and u"txt" in file:
+ file_to_read = f"{spec.cpta[u'output-file']}/{file}"
+ with open(f"{file_to_read}", u"rt") as input:
+ data = data + input.readlines()
file_name = \
f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
with open(file_name, u'w') as txt_file:
for test_name, classification in job_data.items():
if classification == u"regression":
file_name = \
f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
with open(file_name, u'w') as txt_file:
for test_name, classification in job_data.items():
if classification == u"regression":
- txt_file.write(test_name + u'\n')
+ tst = test_name.split(" ")[1].split(".")[1:]
+ nic = tst[0].split("-")[0]
+ tst_name = f"{nic}-{tst[1]}"
+
+ for line in data:
+ if tst_name in line:
+ line = line.replace(" ", "")
+ trend = line.split("|")[2]
+ number = line.split("|")[3]
+ ltc = line.split("|")[4]
+ txt_file.write(f"{tst_name} [ {trend}M | "
+ f"#{number} | {ltc}% ]\n")
+
if classification in (u"regression", u"outlier"):
result = u"FAIL"
if classification in (u"regression", u"outlier"):
result = u"FAIL"
+
+ txt_file.write(f"{legend_str}regression in percentage ]")
+
file_name = \
f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
with open(file_name, u'w') as txt_file:
for test_name, classification in job_data.items():
if classification == u"progression":
file_name = \
f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
with open(file_name, u'w') as txt_file:
for test_name, classification in job_data.items():
if classification == u"progression":
- txt_file.write(test_name + u'\n')
+ tst = test_name.split(" ")[1].split(".")[1:]
+ nic = tst[0].split("-")[0]
+ tst_name = f"{nic}-{tst[1]}"
+
+ for line in data:
+ if tst_name in line:
+ line = line.replace(" ", "")
+ trend = line.split("|")[2]
+ number = line.split("|")[3]
+ ltc = line.split("|")[4]
+ txt_file.write(f"{tst_name} [ {trend}M | "
+ f"#{number} | {ltc}% ]\n")
+
+ txt_file.write(f"{legend_str}progression in percentage ]")
header = [
u"Test Case",
u"Trend [Mpps]",
header = [
u"Test Case",
u"Trend [Mpps]",
- u"Short-Term Change [%]",
- u"Long-Term Change [%]",
+ u"Number of runs [#]",
+ u"Trend Change [%]",
u"Regressions [#]",
u"Progressions [#]"
]
u"Regressions [#]",
u"Progressions [#]"
]
last_avg = avgs[-1]
avg_week_ago = avgs[max(-win_size, -len(avgs))]
last_avg = avgs[-1]
avg_week_ago = avgs[max(-win_size, -len(avgs))]
+ nr_of_last_avgs = 0;
+ for x in reversed(avgs):
+ if x == last_avg:
+ nr_of_last_avgs += 1
+ else:
+ break
+
if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
rel_change_last = nan
else:
if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
rel_change_last = nan
else:
tbl_lst.append(
[tbl_dict[tst_name][u"name"],
round(last_avg / 1e6, 2),
tbl_lst.append(
[tbl_dict[tst_name][u"name"],
round(last_avg / 1e6, 2),
rel_change_long,
classification_lst[-win_size+1:].count(u"regression"),
classification_lst[-win_size+1:].count(u"progression")])
tbl_lst.sort(key=lambda rel: rel[0])
rel_change_long,
classification_lst[-win_size+1:].count(u"regression"),
classification_lst[-win_size+1:].count(u"progression")])
tbl_lst.sort(key=lambda rel: rel[0])
- tbl_lst.sort(key=lambda rel: rel[3])
tbl_lst.sort(key=lambda rel: rel[2])
tbl_lst.sort(key=lambda rel: rel[2])
-
- tbl_sorted = list()
- for nrr in range(table[u"window"], -1, -1):
- tbl_reg = [item for item in tbl_lst if item[4] == nrr]
- for nrp in range(table[u"window"], -1, -1):
- tbl_out = [item for item in tbl_reg if item[5] == nrp]
- tbl_sorted.extend(tbl_out)
+ tbl_lst.sort(key=lambda rel: rel[3])
+ tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
+ tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
logging.info(f" Writing file: {file_name}")
with open(file_name, u"wt") as file_handler:
file_handler.write(header_str)
file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
logging.info(f" Writing file: {file_name}")
with open(file_name, u"wt") as file_handler:
file_handler.write(header_str)
- for test in tbl_sorted:
file_handler.write(u",".join([str(item) for item in test]) + u'\n')
logging.info(f" Writing file: {table[u'output-file']}.txt")
file_handler.write(u",".join([str(item) for item in test]) + u'\n')
logging.info(f" Writing file: {table[u'output-file']}.txt")