-VPP Performance Trending
-========================
-
-This auto-generated document contains VPP performance trending graphs and data.
-It is generated using CSIT continuous trending test and analysis jobs and is
-updated daily. More detail is available on
-`CSIT Performance Trending and Analysis <https://wiki.fd.io/view/CSIT/PerformanceTrendingAnalysis>`_
-wiki page.
+VPP MRR Performance Dashboard
+=============================
+
+Description
+-----------
+
+Dashboard tables list a summary of per test-case VPP MRR performance trend
+values and detected anomalies (Maximum Receive Rate - received packet rate
+under line rate load). Data comes from trending MRR jobs executed every 12 hrs
+(2:00, 14:00 UTC). Trend and anomaly calculations are done over a rolling
+window of <N> samples, currently with N=14 covering last 7 days. Separate
+tables are generated for tested VPP worker-thread-core combinations (1t1c,
+2t2c, 4t4c).
+
+Legend to table:
+
+ - "Test case": name of CSIT test case, naming convention here
+ `CSIT/csit-test-naming <https://wiki.fd.io/view/CSIT/csit-test-naming>`_
+ - "Thput trend [Mpps]": last value of trend over rolling window.
+ - "Anomaly value [Mpps]": in precedence - i) highest outlier if 3
+ consecutive outliers, ii) highest regression if regressions detected,
+ iii) highest progression if progressions detected, iv) nil if normal i.e.
+ within trend.
+ - "Anomaly vs. Trend [%]": anomaly value vs. trend value.
+ - "Classification": outlier, regression, progression, normal - observed
+ over a rolling window.
+ - "# Outliers": number of outliers detected.
+
+Tables are listed in sections 1.x. Followed by daily trending graphs in
+sections 2.x. Daily trending data used to generate the graphs is listed in
+sections 3.x.
+
+VPP worker on 1t1c
+------------------
+
+.. include:: ../../../_build/_static/vpp/performance-trending-dashboard-1t1c.rst
+
+VPP worker on 2t2c
+------------------
+
+.. include:: ../../../_build/_static/vpp/performance-trending-dashboard-2t2c.rst
+
+VPP worker on 4t4c
+------------------
+
+.. include:: ../../../_build/_static/vpp/performance-trending-dashboard-4t4c.rst
+
return results
-def _generate_trending_traces(in_data, period, moving_win_size=10,
+def _generate_trending_traces(in_data, build_info, period, moving_win_size=10,
fill_missing=True, use_first=False,
show_moving_median=True, name="", color=""):
"""Generate the trending traces:
- outliers, regress, progress
:param in_data: Full data set.
+ :param build_info: Information about the builds.
:param period: Sampling period.
:param moving_win_size: Window size.
:param fill_missing: If the chosen sample is missing in the full set, its
:param name: Name of the plot
:param color: Name of the color for the plot.
:type in_data: OrderedDict
+ :type build_info: dict
:type period: int
:type moving_win_size: int
:type fill_missing: bool
in_data = _select_data(in_data, period,
fill_missing=fill_missing,
use_first=use_first)
-
+ # try:
+ # data_x = ["{0}/{1}".format(key, build_info[str(key)][1].split("~")[-1])
+ # for key in in_data.keys()]
+ # except KeyError:
+ # data_x = [key for key in in_data.keys()]
+ hover_text = ["vpp-build: {0}".format(x[1].split("~")[-1])
+ for x in build_info.values()]
data_x = [key for key in in_data.keys()]
+
data_y = [val for val in in_data.values()]
data_pd = pd.Series(data_y, index=data_x)
anomalies = pd.Series()
anomalies_res = list()
for idx, item in enumerate(in_data.items()):
+ # item_pd = pd.Series([item[1], ],
+ # index=["{0}/{1}".
+ # format(item[0],
+ # build_info[str(item[0])][1].split("~")[-1]),
+ # ])
item_pd = pd.Series([item[1], ], index=[item[0], ])
if item[0] in outliers.keys():
anomalies = anomalies.append(item_pd)
"color": color,
"symbol": "circle",
},
+ text=hover_text,
+ hoverinfo="x+y+text+name"
)
traces = [trace_samples, ]
builds_lst.append(str(build["build"]))
# Get "build ID": "date" dict:
- build_dates = dict()
+ build_info = OrderedDict()
for build in builds_lst:
try:
- build_dates[build] = \
- input_data.metadata(job_name, build)["generated"][:14]
+ build_info[build] = (
+ input_data.metadata(job_name, build)["generated"][:14],
+ input_data.metadata(job_name, build)["version"]
+ )
except KeyError:
- pass
+ build_info[build] = ("", "")
+ logging.info("{}: {}, {}".format(build,
+ build_info[build][0],
+ build_info[build][1]))
# Create the header:
csv_table = list()
header = "Build Number:," + ",".join(builds_lst) + '\n'
csv_table.append(header)
- header = "Build Date:," + ",".join(build_dates.values()) + '\n'
+ build_dates = [x[0] for x in build_info.values()]
+ header = "Build Date:," + ",".join(build_dates) + '\n'
+ csv_table.append(header)
+ vpp_versions = [x[1] for x in build_info.values()]
+ header = "VPP Version:," + ",".join(vpp_versions) + '\n'
csv_table.append(header)
results = list()
for period in chart["periods"]:
# Generate traces:
traces = list()
- win_size = 10 if period == 1 else 5 if period < 20 else 3
+ win_size = 14 if period == 1 else 5 if period < 20 else 3
idx = 0
for test_name, test_data in chart_data.items():
if not test_data:
test_name = test_name.split('.')[-1]
trace, result = _generate_trending_traces(
test_data,
+ build_info=build_info,
period=period,
moving_win_size=win_size,
fill_missing=True,
row[idx] = str(round(float(item) / 1000000, 2))
except ValueError:
pass
- txt_table.add_row(row)
+ try:
+ txt_table.add_row(row)
+ except Exception as err:
+ logging.warning("Error occurred while generating TXT table:"
+ "\n{0}".format(err))
line_nr += 1
txt_table.align["Build Number:"] = "l"
with open("{0}.txt".format(file_name), "w") as txt_file:
item.append(round(stdev(data_t) / 1000000, 2))
else:
item.extend([None, None])
- if item[1] is not None and item[3] is not None:
+ if item[1] is not None and item[3] is not None and item[1] != 0:
item.append(int(relative_change(float(item[1]), float(item[3]))))
if len(item) == 6:
tbl_lst.append(item)
classification = "outlier"
elif "progression" in classification_lst[first_idx:]:
classification = "progression"
- else:
+ elif "normal" in classification_lst[first_idx:]:
classification = "normal"
+ else:
+ classification = None
idx = len(classification_lst) - 1
while idx:
# Sort the table according to the classification
tbl_sorted = list()
- for classification in ("regression", "outlier", "progression", "normal"):
+ for classification in ("regression", "progression", "outlier", "normal"):
tbl_tmp = [item for item in tbl_lst if item[4] == classification]
tbl_tmp.sort(key=lambda rel: rel[0])
tbl_sorted.extend(tbl_tmp)