X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_cpta.py;h=0bef38d82d1f0fc42ba1916348416e7f2be8b36c;hp=4b10440257bd338d030d2cfc3bd68c1d4a877fed;hb=575b935029aa496629f138d0e5f756921b64d1e6;hpb=1da19da813655f643bc3c6e4d03bed987f076f07 diff --git a/resources/tools/presentation/generator_cpta.py b/resources/tools/presentation/generator_cpta.py index 4b10440257..0bef38d82d 100644 --- a/resources/tools/presentation/generator_cpta.py +++ b/resources/tools/presentation/generator_cpta.py @@ -13,7 +13,6 @@ """Generation of Continuous Performance Trending and Analysis. """ - import re import logging import csv @@ -21,6 +20,7 @@ import csv from collections import OrderedDict from datetime import datetime from copy import deepcopy +from os import listdir import prettytable import plotly.offline as ploff @@ -180,25 +180,32 @@ def _generate_trending_traces(in_data, job_name, build_info, :rtype: tuple(traces, result) """ - if incl_tests not in (u"mrr", u"ndr", u"pdr"): + if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"): return list(), None data_x = list(in_data.keys()) data_y_pps = list() data_y_mpps = list() data_y_stdev = list() - for item in in_data.values(): - data_y_pps.append(float(item[u"receive-rate"])) - data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6) - data_y_mpps.append(float(item[u"receive-rate"]) / 1e6) - + if incl_tests == u"pdr-lat": + for item in in_data.values(): + data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6) + data_y_stdev.append(float(u"nan")) + data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6) + multi = 1.0 + else: + for item in in_data.values(): + data_y_pps.append(float(item[u"receive-rate"])) + data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6) + data_y_mpps.append(float(item[u"receive-rate"]) / 1e6) + multi = 1e6 hover_text = list() xaxis = list() for index, key in enumerate(data_x): str_key = str(key) date = build_info[job_name][str_key][0] hover_str = (u"date: {date}
" - u"{property} [Mpps]: {value:.3f}
" + u"{property} [Mpps]:
" u"" u"{sut}-ref: {build}
" u"csit-ref: {test}-{period}-build-{build_nr}
" @@ -209,10 +216,26 @@ def _generate_trending_traces(in_data, job_name, build_info, ) else: hover_str = hover_str.replace(u"", u"") + if incl_tests == u"pdr-lat": + hover_str = hover_str.replace(u"", u"{value:.1e}") + else: + hover_str = hover_str.replace(u"", u"{value:.3f}") if u"-cps" in name: - hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]") - if u"dpdk" in job_name: - hover_text.append(hover_str.format( + hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\ + replace(u"throughput", u"connection rate") + if u"vpp" in job_name: + hover_str = hover_str.format( + date=date, + property=u"average" if incl_tests == u"mrr" else u"throughput", + value=data_y_mpps[index], + sut=u"vpp", + build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0], + test=incl_tests, + period=u"daily" if incl_tests == u"mrr" else u"weekly", + build_nr=str_key, + testbed=build_info[job_name][str_key][2]) + elif u"dpdk" in job_name: + hover_str = hover_str.format( date=date, property=u"average" if incl_tests == u"mrr" else u"throughput", value=data_y_mpps[index], @@ -221,22 +244,23 @@ def _generate_trending_traces(in_data, job_name, build_info, test=incl_tests, period=u"weekly", build_nr=str_key, - testbed=build_info[job_name][str_key][2])) - elif u"vpp" in job_name: + testbed=build_info[job_name][str_key][2]) + elif u"trex" in job_name: hover_str = hover_str.format( date=date, property=u"average" if incl_tests == u"mrr" else u"throughput", value=data_y_mpps[index], - sut=u"vpp", - build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0], + sut=u"trex", + build=u"", test=incl_tests, period=u"daily" if incl_tests == u"mrr" else u"weekly", build_nr=str_key, testbed=build_info[job_name][str_key][2]) - if u"-cps" in name: - hover_str = hover_str.replace(u"throughput", u"connection rate") - hover_text.append(hover_str) - + if incl_tests == u"pdr-lat": + hover_str = hover_str.replace( + u"throughput [Mpps]", u"latency [s]" + ) + hover_text.append(hover_str) xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), int(date[9:11]), int(date[12:]))) @@ -249,9 +273,9 @@ def _generate_trending_traces(in_data, job_name, build_info, classify_anomalies(data_pd) except ValueError as err: logging.info(f"{err} Skipping") - return - avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps] - stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps] + return list(), None + avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps] + stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps] anomalies = OrderedDict() anomalies_colors = list() @@ -264,7 +288,7 @@ def _generate_trending_traces(in_data, job_name, build_info, if anomaly_classification: for index, (key, value) in enumerate(data_pd.items()): if anomaly_classification[index] in (u"regression", u"progression"): - anomalies[key] = value / 1e6 + anomalies[key] = value / multi anomalies_colors.append( anomaly_color[anomaly_classification[index]]) anomalies_avgs.append(avgs_mpps[index]) @@ -294,10 +318,15 @@ def _generate_trending_traces(in_data, job_name, build_info, trend_hover_text = list() for idx in range(len(data_x)): - trend_hover_str = ( - f"trend [Mpps]: {avgs_mpps[idx]:.3f}
" - f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}" - ) + if incl_tests == u"pdr-lat": + trend_hover_str = ( + f"trend [s]: {avgs_mpps[idx]:.1e}
" + ) + else: + trend_hover_str = ( + f"trend [Mpps]: {avgs_mpps[idx]:.3f}
" + f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}" + ) trend_hover_text.append(trend_hover_str) trace_trend = plgo.Scatter( @@ -317,6 +346,26 @@ def _generate_trending_traces(in_data, job_name, build_info, ) traces.append(trace_trend) + if incl_tests == u"pdr-lat": + colorscale = [ + [0.00, u"green"], + [0.33, u"green"], + [0.33, u"white"], + [0.66, u"white"], + [0.66, u"red"], + [1.00, u"red"] + ] + ticktext = [u"Progression", u"Normal", u"Regression"] + else: + colorscale = [ + [0.00, u"red"], + [0.33, u"red"], + [0.33, u"white"], + [0.66, u"white"], + [0.66, u"green"], + [1.00, u"green"] + ] + ticktext = [u"Regression", u"Normal", u"Progression"] trace_anomalies = plgo.Scatter( x=list(anomalies.keys()), y=anomalies_avgs, @@ -329,14 +378,7 @@ def _generate_trending_traces(in_data, job_name, build_info, u"size": 15, u"symbol": u"circle-open", u"color": anomalies_colors, - u"colorscale": [ - [0.00, u"red"], - [0.33, u"red"], - [0.33, u"white"], - [0.66, u"white"], - [0.66, u"green"], - [1.00, u"green"] - ], + u"colorscale": colorscale, u"showscale": True, u"line": { u"width": 2 @@ -351,7 +393,7 @@ def _generate_trending_traces(in_data, job_name, build_info, }, u"tickmode": u"array", u"tickvals": [0.167, 0.500, 0.833], - u"ticktext": [u"Regression", u"Normal", u"Progression"], + u"ticktext": ticktext, u"ticks": u"", u"ticklen": 0, u"tickangle": -90, @@ -398,7 +440,7 @@ def _generate_all_charts(spec, input_data): data = input_data.filter_tests_by_name( graph, - params=[u"type", u"result", u"throughput", u"tags"], + params=[u"type", u"result", u"throughput", u"latency", u"tags"], continue_on_error=True ) @@ -411,6 +453,8 @@ def _generate_all_charts(spec, input_data): for ttype in graph.get(u"test-type", (u"mrr", )): for core in graph.get(u"core", tuple()): csv_tbl = list() + csv_tbl_lat_1 = list() + csv_tbl_lat_2 = list() res = dict() chart_data = dict() chart_tags = dict() @@ -426,6 +470,8 @@ def _generate_all_charts(spec, input_data): if chart_data.get(test_id, None) is None: chart_data[test_id] = OrderedDict() try: + lat_1 = u"" + lat_2 = u"" if ttype == u"mrr": rate = test[u"result"][u"receive-rate"] stdev = \ @@ -438,12 +484,23 @@ def _generate_all_charts(spec, input_data): rate = \ test["throughput"][u"PDR"][u"LOWER"] stdev = float(u"nan") + lat_1 = test[u"latency"][u"PDR50"]\ + [u"direction1"][u"avg"] + lat_2 = test[u"latency"][u"PDR50"]\ + [u"direction2"][u"avg"] else: continue chart_data[test_id][int(index)] = { u"receive-rate": rate, u"receive-stdev": stdev } + if ttype == u"pdr": + chart_data[test_id][int(index)].update( + { + u"lat_1": lat_1, + u"lat_2": lat_2 + } + ) chart_tags[test_id] = \ test.get(u"tags", None) except (KeyError, TypeError): @@ -452,17 +509,36 @@ def _generate_all_charts(spec, input_data): # Add items to the csv table: for tst_name, tst_data in chart_data.items(): tst_lst = list() + tst_lst_lat_1 = list() + tst_lst_lat_2 = list() for bld in builds_dict[job_name]: itm = tst_data.get(int(bld), dict()) # CSIT-1180: Itm will be list, compute stats. try: tst_lst.append(str(itm.get(u"receive-rate", u""))) + if ttype == u"pdr": + tst_lst_lat_1.append( + str(itm.get(u"lat_1", u"")) + ) + tst_lst_lat_2.append( + str(itm.get(u"lat_2", u"")) + ) except AttributeError: tst_lst.append(u"") + if ttype == u"pdr": + tst_lst_lat_1.append(u"") + tst_lst_lat_2.append(u"") csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n') + csv_tbl_lat_1.append( + f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n" + ) + csv_tbl_lat_2.append( + f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n" + ) # Generate traces: traces = list() + traces_lat = list() index = 0 groups = graph.get(u"groups", None) visibility = list() @@ -517,6 +593,18 @@ def _generate_all_charts(spec, input_data): color=COLORS[index], incl_tests=ttype ) + if ttype == u"pdr": + trace_lat, _ = _generate_trending_traces( + test_data, + job_name=job_name, + build_info=build_info, + name=u'-'.join( + tst_name.split(u'.')[-1].split( + u'-')[2:-1]), + color=COLORS[index], + incl_tests=u"pdr-lat" + ) + traces_lat.extend(trace_lat) except IndexError: logging.error( f"Out of colors: index: " @@ -594,10 +682,39 @@ def _generate_all_charts(spec, input_data): except plerr.PlotlyEmptyDataError: logging.warning(u"No data for the plot. Skipped.") + if traces_lat: + try: + layout = deepcopy(graph[u"layout"]) + layout[u"yaxis"][u"title"] = u"Latency [s]" + layout[u"yaxis"][u"tickformat"] = u".3s" + except KeyError as err: + logging.error(u"Finished with error: No layout defined") + logging.error(repr(err)) + return dict() + name_file = ( + f"{spec.cpta[u'output-file']}/" + f"{graph[u'output-file-name']}-lat.html" + ) + name_file = name_file.format(core=core, test_type=ttype) + + logging.info(f" Writing the file {name_file}") + plpl = plgo.Figure(data=traces_lat, layout=layout) + try: + ploff.plot( + plpl, + show_link=False, + auto_open=False, + filename=name_file + ) + except plerr.PlotlyEmptyDataError: + logging.warning(u"No data for the plot. Skipped.") + return_lst.append( { u"job_name": job_name, u"csv_table": csv_tbl, + u"csv_lat_1": csv_tbl_lat_1, + u"csv_lat_2": csv_tbl_lat_2, u"results": res } ) @@ -634,17 +751,34 @@ def _generate_all_charts(spec, input_data): # Create the table header: csv_tables = dict() + csv_tables_l1 = dict() + csv_tables_l2 = dict() for job_name in builds_dict: if csv_tables.get(job_name, None) is None: csv_tables[job_name] = list() + if csv_tables_l1.get(job_name, None) is None: + csv_tables_l1[job_name] = list() + if csv_tables_l2.get(job_name, None) is None: + csv_tables_l2[job_name] = list() header = f"Build Number:,{u','.join(builds_dict[job_name])}\n" csv_tables[job_name].append(header) + csv_tables_l1[job_name].append(header) + csv_tables_l2[job_name].append(header) build_dates = [x[0] for x in build_info[job_name].values()] header = f"Build Date:,{u','.join(build_dates)}\n" csv_tables[job_name].append(header) + csv_tables_l1[job_name].append(header) + csv_tables_l2[job_name].append(header) versions = [x[1] for x in build_info[job_name].values()] header = f"Version:,{u','.join(versions)}\n" csv_tables[job_name].append(header) + csv_tables_l1[job_name].append(header) + csv_tables_l2[job_name].append(header) + testbed = [x[2] for x in build_info[job_name].values()] + header = f"Test bed:,{u','.join(testbed)}\n" + csv_tables[job_name].append(header) + csv_tables_l1[job_name].append(header) + csv_tables_l2[job_name].append(header) for chart in spec.cpta[u"plots"]: results = _generate_chart(chart) @@ -653,6 +787,8 @@ def _generate_all_charts(spec, input_data): for result in results: csv_tables[result[u"job_name"]].extend(result[u"csv_table"]) + csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"]) + csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"]) if anomaly_classifications.get(result[u"job_name"], None) is None: anomaly_classifications[result[u"job_name"]] = dict() @@ -691,24 +827,71 @@ def _generate_all_charts(spec, input_data): with open(f"{file_name}.txt", u"wt") as txt_file: txt_file.write(str(txt_table)) + for job_name, csv_table in csv_tables_l1.items(): + file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1" + with open(f"{file_name}.csv", u"wt") as file_handler: + file_handler.writelines(csv_table) + for job_name, csv_table in csv_tables_l2.items(): + file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2" + with open(f"{file_name}.csv", u"wt") as file_handler: + file_handler.writelines(csv_table) + # Evaluate result: if anomaly_classifications: + legend_str = (f"Legend:\n[ Last trend in Mpps/Mcps | number of runs for" + f" last trend | ") result = u"PASS" for job_name, job_data in anomaly_classifications.items(): + data = [] + tb = u"-".join(job_name.split(u"-")[-2:]) + for file in listdir(f"{spec.cpta[u'output-file']}"): + if tb in file and u"performance-trending-dashboard" in \ + file and u"txt" in file: + file_to_read = f"{spec.cpta[u'output-file']}/{file}" + with open(f"{file_to_read}", u"rt") as input: + data = data + input.readlines() file_name = \ f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt" with open(file_name, u'w') as txt_file: for test_name, classification in job_data.items(): if classification == u"regression": - txt_file.write(test_name + u'\n') + tst = test_name.split(" ")[1].split(".")[1:] + nic = tst[0].split("-")[0] + tst_name = f"{nic}-{tst[1]}" + + for line in data: + if tst_name in line: + line = line.replace(" ", "") + trend = line.split("|")[2] + number = line.split("|")[3] + ltc = line.split("|")[4] + txt_file.write(f"{tst_name} [ {trend}M | " + f"#{number} | {ltc}% ]\n") + if classification in (u"regression", u"outlier"): result = u"FAIL" + + txt_file.write(f"{legend_str}regression in percentage ]") + file_name = \ f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt" with open(file_name, u'w') as txt_file: for test_name, classification in job_data.items(): if classification == u"progression": - txt_file.write(test_name + u'\n') + tst = test_name.split(" ")[1].split(".")[1:] + nic = tst[0].split("-")[0] + tst_name = f"{nic}-{tst[1]}" + + for line in data: + if tst_name in line: + line = line.replace(" ", "") + trend = line.split("|")[2] + number = line.split("|")[3] + ltc = line.split("|")[4] + txt_file.write(f"{tst_name} [ {trend}M | " + f"#{number} | {ltc}% ]\n") + + txt_file.write(f"{legend_str}progression in percentage ]") else: result = u"FAIL"