X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_cpta.py;h=a308f64e2e7406b7486104e07db7da3616f39446;hb=48cd54ff00049d58494834d25d3f0ac846ce4017;hp=c9bc44c460202f8c85c8a7d89dd34abda1af616a;hpb=190462e1f242b59d927eff3e63826fe6343eadbc;p=csit.git diff --git a/resources/tools/presentation/generator_cpta.py b/resources/tools/presentation/generator_cpta.py index c9bc44c460..a308f64e2e 100644 --- a/resources/tools/presentation/generator_cpta.py +++ b/resources/tools/presentation/generator_cpta.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. +# Copyright (c) 2020 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -146,7 +146,7 @@ def generate_cpta(spec, data): def _generate_trending_traces(in_data, job_name, build_info, - show_trend_line=True, name=u"", color=u""): + name=u"", color=u""): """Generate the trending traces: - samples, - outliers, regress, progress @@ -155,13 +155,11 @@ def _generate_trending_traces(in_data, job_name, build_info, :param in_data: Full data set. :param job_name: The name of job which generated the data. :param build_info: Information about the builds. - :param show_trend_line: Show moving median (trending plot). :param name: Name of the plot :param color: Name of the color for the plot. :type in_data: OrderedDict :type job_name: str :type build_info: dict - :type show_trend_line: bool :type name: str :type color: str :returns: Generated traces (list) and the evaluated result. @@ -169,44 +167,56 @@ def _generate_trending_traces(in_data, job_name, build_info, """ data_x = list(in_data.keys()) - data_y = [float(item) / 1e6 for item in in_data.values()] + data_y_pps = list() + data_y_mpps = list() + data_y_stdev = list() + for item in in_data.values(): + data_y_pps.append(float(item[u"receive-rate"])) + data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6) + data_y_mpps.append(float(item[u"receive-rate"]) / 1e6) hover_text = list() xaxis = list() - for idx in data_x: - date = build_info[job_name][str(idx)][0] + for index, key in enumerate(data_x): + str_key = str(key) + date = build_info[job_name][str_key][0] hover_str = (u"date: {date}
" - u"value: {value:,}
" + u"average [Mpps]: {value:.3f}
" + u"stdev [Mpps]: {stdev:.3f}
" u"{sut}-ref: {build}
" u"csit-ref: mrr-{period}-build-{build_nr}
" u"testbed: {testbed}") if u"dpdk" in job_name: hover_text.append(hover_str.format( date=date, - value=int(in_data[idx]), + value=data_y_mpps[index], + stdev=data_y_stdev[index], sut=u"dpdk", - build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0], + build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0], period=u"weekly", - build_nr=idx, - testbed=build_info[job_name][str(idx)][2])) + build_nr=str_key, + testbed=build_info[job_name][str_key][2])) elif u"vpp" in job_name: hover_text.append(hover_str.format( date=date, - value=int(in_data[idx]), + value=data_y_mpps[index], + stdev=data_y_stdev[index], sut=u"vpp", - build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0], + build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0], period=u"daily", - build_nr=idx, - testbed=build_info[job_name][str(idx)][2])) + build_nr=str_key, + testbed=build_info[job_name][str_key][2])) xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), int(date[9:11]), int(date[12:]))) data_pd = OrderedDict() - for key, value in zip(xaxis, data_y): + for key, value in zip(xaxis, data_y_pps): data_pd[key] = value - anomaly_classification, avgs = classify_anomalies(data_pd) + anomaly_classification, avgs_pps, stdevs_pps = classify_anomalies(data_pd) + avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps] + stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps] anomalies = OrderedDict() anomalies_colors = list() @@ -217,20 +227,19 @@ def _generate_trending_traces(in_data, job_name, build_info, u"progression": 1.0 } if anomaly_classification: - for idx, (key, value) in enumerate(data_pd.items()): - if anomaly_classification[idx] in \ - (u"outlier", u"regression", u"progression"): - anomalies[key] = value + for index, (key, value) in enumerate(data_pd.items()): + if anomaly_classification[index] in (u"regression", u"progression"): + anomalies[key] = value / 1e6 anomalies_colors.append( - anomaly_color[anomaly_classification[idx]]) - anomalies_avgs.append(avgs[idx]) + anomaly_color[anomaly_classification[index]]) + anomalies_avgs.append(avgs_mpps[index]) anomalies_colors.extend([0.0, 0.5, 1.0]) # Create traces trace_samples = plgo.Scatter( x=xaxis, - y=data_y, + y=data_y_mpps, mode=u"markers", line={ u"width": 1 @@ -244,27 +253,34 @@ def _generate_trending_traces(in_data, job_name, build_info, u"symbol": u"circle", }, text=hover_text, - hoverinfo=u"text" + hoverinfo=u"text+name" ) traces = [trace_samples, ] - if show_trend_line: - trace_trend = plgo.Scatter( - x=xaxis, - y=avgs, - mode=u"lines", - line={ - u"shape": u"linear", - u"width": 1, - u"color": color, - }, - showlegend=False, - legendgroup=name, - name=f"{name}", - text=[f"trend: {int(avg):,}" for avg in avgs], - hoverinfo=u"text+name" + trend_hover_text = list() + for idx in range(len(data_x)): + trend_hover_str = ( + f"trend [Mpps]: {avgs_mpps[idx]:.3f}
" + f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}" ) - traces.append(trace_trend) + trend_hover_text.append(trend_hover_str) + + trace_trend = plgo.Scatter( + x=xaxis, + y=avgs_mpps, + mode=u"lines", + line={ + u"shape": u"linear", + u"width": 1, + u"color": color, + }, + showlegend=False, + legendgroup=name, + name=f"{name}", + text=trend_hover_text, + hoverinfo=u"text+name" + ) + traces.append(trace_trend) trace_anomalies = plgo.Scatter( x=list(anomalies.keys()), @@ -335,11 +351,7 @@ def _generate_all_charts(spec, input_data): :rtype: dict """ - logs = list() - - logs.append( - (u"INFO", f" Generating the chart {graph.get(u'title', u'')} ...") - ) + logging.info(f" Generating the chart {graph.get(u'title', u'')} ...") job_name = list(graph[u"data"].keys())[0] @@ -347,19 +359,22 @@ def _generate_all_charts(spec, input_data): res = dict() # Transform the data - logs.append( - (u"INFO", - f" Creating the data set for the {graph.get(u'type', u'')} " - f"{graph.get(u'title', u'')}." - ) + logging.info( + f" Creating the data set for the {graph.get(u'type', u'')} " + f"{graph.get(u'title', u'')}." ) if graph.get(u"include", None): data = input_data.filter_tests_by_name( - graph, continue_on_error=True + graph, + params=[u"type", u"result", u"tags"], + continue_on_error=True ) else: - data = input_data.filter_data(graph, continue_on_error=True) + data = input_data.filter_data( + graph, + params=[u"type", u"result", u"tags"], + continue_on_error=True) if data is None or data.empty: logging.error(u"No data.") @@ -375,8 +390,10 @@ def _generate_all_charts(spec, input_data): if chart_data.get(test_name, None) is None: chart_data[test_name] = OrderedDict() try: - chart_data[test_name][int(index)] = \ - test[u"result"][u"receive-rate"] + chart_data[test_name][int(index)] = { + u"receive-rate": test[u"result"][u"receive-rate"], + u"receive-stdev": test[u"result"][u"receive-stdev"] + } chart_tags[test_name] = test.get(u"tags", None) except (KeyError, TypeError): pass @@ -385,9 +402,12 @@ def _generate_all_charts(spec, input_data): for tst_name, tst_data in chart_data.items(): tst_lst = list() for bld in builds_dict[job_name]: - itm = tst_data.get(int(bld), u'') + itm = tst_data.get(int(bld), dict()) # CSIT-1180: Itm will be list, compute stats. - tst_lst.append(str(itm)) + try: + tst_lst.append(str(itm.get(u"receive-rate", u""))) + except AttributeError: + tst_lst.append(u"") csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n') # Generate traces: @@ -402,13 +422,10 @@ def _generate_all_charts(spec, input_data): for tag in group: for tst_name, test_data in chart_data.items(): if not test_data: - logs.append( - (u"WARNING", f"No data for the test {tst_name}") - ) + logging.warning(f"No data for the test {tst_name}") continue if tag not in chart_tags[tst_name]: continue - message = f"index: {index}, test: {tst_name}" try: trace, rslt = _generate_trending_traces( test_data, @@ -418,10 +435,8 @@ def _generate_all_charts(spec, input_data): split(u'-')[2:-1]), color=COLORS[index]) except IndexError: - logs.append( - (u"ERROR", f"Out of colors: {message}") - ) - logging.error(f"Out of colors: {message}") + logging.error(f"Out of colors: index: " + f"{index}, test: {tst_name}") index += 1 continue traces.extend(trace) @@ -433,11 +448,8 @@ def _generate_all_charts(spec, input_data): else: for tst_name, test_data in chart_data.items(): if not test_data: - logs.append( - (u"WARNING", f"No data for the test {tst_name}") - ) + logging.warning(f"No data for the test {tst_name}") continue - message = f"index: {index}, test: {tst_name}" try: trace, rslt = _generate_trending_traces( test_data, @@ -447,8 +459,9 @@ def _generate_all_charts(spec, input_data): tst_name.split(u'.')[-1].split(u'-')[2:-1]), color=COLORS[index]) except IndexError: - logs.append((u"ERROR", f"Out of colors: {message}")) - logging.error(f"Out of colors: {message}") + logging.error( + f"Out of colors: index: {index}, test: {tst_name}" + ) index += 1 continue traces.extend(trace) @@ -506,25 +519,13 @@ def _generate_all_charts(spec, input_data): f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}" f"{spec.cpta[u'output-file-type']}") - logs.append((u"INFO", f" Writing the file {name_file} ...")) + logging.info(f" Writing the file {name_file} ...") plpl = plgo.Figure(data=traces, layout=layout) try: ploff.plot(plpl, show_link=False, auto_open=False, filename=name_file) except plerr.PlotlyEmptyDataError: - logs.append((u"WARNING", u"No data for the plot. Skipped.")) - - for level, line in logs: - if level == u"INFO": - logging.info(line) - elif level == u"ERROR": - logging.error(line) - elif level == u"DEBUG": - logging.debug(line) - elif level == u"CRITICAL": - logging.critical(line) - elif level == u"WARNING": - logging.warning(line) + logging.warning(u"No data for the plot. Skipped.") return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res} @@ -534,7 +535,7 @@ def _generate_all_charts(spec, input_data): builds_dict[job] = list() for build in spec.input[u"builds"][job]: status = build[u"status"] - if status not in (u"failed", u"not found", u"removed"): + if status not in (u"failed", u"not found", u"removed", None): builds_dict[job].append(str(build[u"build"])) # Create "build ID": "date" dict: @@ -556,18 +557,18 @@ def _generate_all_charts(spec, input_data): anomaly_classifications = dict() - # Create the header: + # Create the table header: csv_tables = dict() for job_name in builds_dict: if csv_tables.get(job_name, None) is None: csv_tables[job_name] = list() - header = u"Build Number:," + u",".join(builds_dict[job_name]) + u'\n' + header = f"Build Number:,{u','.join(builds_dict[job_name])}\n" csv_tables[job_name].append(header) build_dates = [x[0] for x in build_info[job_name].values()] - header = u"Build Date:," + u",".join(build_dates) + u'\n' + header = f"Build Date:,{u','.join(build_dates)}\n" csv_tables[job_name].append(header) versions = [x[1] for x in build_info[job_name].values()] - header = u"Version:," + u",".join(versions) + u'\n' + header = f"Version:,{u','.join(versions)}\n" csv_tables[job_name].append(header) for chart in spec.cpta[u"plots"]: @@ -583,8 +584,8 @@ def _generate_all_charts(spec, input_data): # Write the tables: for job_name, csv_table in csv_tables.items(): - file_name = spec.cpta[u"output-file"] + u"-" + job_name + u"-trending" - with open(f"{file_name}.csv", u"w") as file_handler: + file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending" + with open(f"{file_name}.csv", u"wt") as file_handler: file_handler.writelines(csv_table) txt_table = None @@ -610,7 +611,7 @@ def _generate_all_charts(spec, input_data): ) line_nr += 1 txt_table.align[u"Build Number:"] = u"l" - with open(f"{file_name}.txt", u"w") as txt_file: + with open(f"{file_name}.txt", u"wt") as txt_file: txt_file.write(str(txt_table)) # Evaluate result: