X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_cpta.py;h=5cc56fd9655d994dd95f4154808a5bd2e9388477;hp=c9bc44c460202f8c85c8a7d89dd34abda1af616a;hb=3b5495d0943283e96ce5fa23c0b1f31846ca0f6f;hpb=190462e1f242b59d927eff3e63826fe6343eadbc diff --git a/resources/tools/presentation/generator_cpta.py b/resources/tools/presentation/generator_cpta.py index c9bc44c460..5cc56fd965 100644 --- a/resources/tools/presentation/generator_cpta.py +++ b/resources/tools/presentation/generator_cpta.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. +# Copyright (c) 2021 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -14,6 +14,7 @@ """Generation of Continuous Performance Trending and Analysis. """ +import re import logging import csv @@ -30,7 +31,7 @@ from pal_utils import archive_input_data, execute_command, classify_anomalies # Command to build the html format of the report -HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \ +HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \ u'-b html -E ' \ u'-t html ' \ u'-D version="{date}" ' \ @@ -92,20 +93,31 @@ THEME_OVERRIDES = u"""/* override table width restrictions */ } """ -COLORS = [ - u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink", - u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black", - u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson", - u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod", - u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon", - u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey", - u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink", - u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black", - u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson", - u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod", - u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon", - u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey" -] +COLORS = ( + u"#1A1110", + u"#DA2647", + u"#214FC6", + u"#01786F", + u"#BD8260", + u"#FFD12A", + u"#A6E7FF", + u"#738276", + u"#C95A49", + u"#FC5A8D", + u"#CEC8EF", + u"#391285", + u"#6F2DA8", + u"#FF878D", + u"#45A27D", + u"#FFD0B9", + u"#FD5240", + u"#DB91EF", + u"#44D7A8", + u"#4F86F7", + u"#84DE02", + u"#FFCFF1", + u"#614051" +) def generate_cpta(spec, data): @@ -137,7 +149,7 @@ def generate_cpta(spec, data): css_file: css_file.write(THEME_OVERRIDES) - if spec.configuration.get(u"archive-inputs", True): + if spec.environment.get(u"archive-inputs", False): archive_input_data(spec) logging.info(u"Done.") @@ -146,7 +158,7 @@ def generate_cpta(spec, data): def _generate_trending_traces(in_data, job_name, build_info, - show_trend_line=True, name=u"", color=u""): + name=u"", color=u"", incl_tests=u"mrr"): """Generate the trending traces: - samples, - outliers, regress, progress @@ -155,58 +167,104 @@ def _generate_trending_traces(in_data, job_name, build_info, :param in_data: Full data set. :param job_name: The name of job which generated the data. :param build_info: Information about the builds. - :param show_trend_line: Show moving median (trending plot). :param name: Name of the plot :param color: Name of the color for the plot. + :param incl_tests: Included tests, accepted values: mrr, ndr, pdr :type in_data: OrderedDict :type job_name: str :type build_info: dict - :type show_trend_line: bool :type name: str :type color: str + :type incl_tests: str :returns: Generated traces (list) and the evaluated result. :rtype: tuple(traces, result) """ - data_x = list(in_data.keys()) - data_y = [float(item) / 1e6 for item in in_data.values()] + if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"): + return list(), None + data_x = list(in_data.keys()) + data_y_pps = list() + data_y_mpps = list() + data_y_stdev = list() + if incl_tests == u"pdr-lat": + for item in in_data.values(): + data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6) + data_y_stdev.append(float(u"nan")) + data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6) + multi = 1.0 + else: + for item in in_data.values(): + data_y_pps.append(float(item[u"receive-rate"])) + data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6) + data_y_mpps.append(float(item[u"receive-rate"]) / 1e6) + multi = 1e6 hover_text = list() xaxis = list() - for idx in data_x: - date = build_info[job_name][str(idx)][0] + for index, key in enumerate(data_x): + str_key = str(key) + date = build_info[job_name][str_key][0] hover_str = (u"date: {date}
" - u"value: {value:,}
" + u"{property} [Mpps]:
" + u"" u"{sut}-ref: {build}
" - u"csit-ref: mrr-{period}-build-{build_nr}
" + u"csit-ref: {test}-{period}-build-{build_nr}
" u"testbed: {testbed}") + if incl_tests == u"mrr": + hover_str = hover_str.replace( + u"", f"stdev [Mpps]: {data_y_stdev[index]:.3f}
" + ) + else: + hover_str = hover_str.replace(u"", u"") + if incl_tests == u"pdr-lat": + hover_str = hover_str.replace(u"", u"{value:.1e}") + else: + hover_str = hover_str.replace(u"", u"{value:.3f}") + if u"-cps" in name: + hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\ + replace(u"throughput", u"connection rate") if u"dpdk" in job_name: - hover_text.append(hover_str.format( + hover_str = hover_str.format( date=date, - value=int(in_data[idx]), + property=u"average" if incl_tests == u"mrr" else u"throughput", + value=data_y_mpps[index], sut=u"dpdk", - build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0], + build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0], + test=incl_tests, period=u"weekly", - build_nr=idx, - testbed=build_info[job_name][str(idx)][2])) + build_nr=str_key, + testbed=build_info[job_name][str_key][2]) elif u"vpp" in job_name: - hover_text.append(hover_str.format( + hover_str = hover_str.format( date=date, - value=int(in_data[idx]), + property=u"average" if incl_tests == u"mrr" else u"throughput", + value=data_y_mpps[index], sut=u"vpp", - build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0], - period=u"daily", - build_nr=idx, - testbed=build_info[job_name][str(idx)][2])) - + build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0], + test=incl_tests, + period=u"daily" if incl_tests == u"mrr" else u"weekly", + build_nr=str_key, + testbed=build_info[job_name][str_key][2]) + if incl_tests == u"pdr-lat": + hover_str = hover_str.replace( + u"throughput [Mpps]", u"latency [s]" + ) + hover_text.append(hover_str) xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), int(date[9:11]), int(date[12:]))) data_pd = OrderedDict() - for key, value in zip(xaxis, data_y): + for key, value in zip(xaxis, data_y_pps): data_pd[key] = value - anomaly_classification, avgs = classify_anomalies(data_pd) + try: + anomaly_classification, avgs_pps, stdevs_pps = \ + classify_anomalies(data_pd) + except ValueError as err: + logging.info(f"{err} Skipping") + return list(), None + avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps] + stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps] anomalies = OrderedDict() anomalies_colors = list() @@ -217,20 +275,19 @@ def _generate_trending_traces(in_data, job_name, build_info, u"progression": 1.0 } if anomaly_classification: - for idx, (key, value) in enumerate(data_pd.items()): - if anomaly_classification[idx] in \ - (u"outlier", u"regression", u"progression"): - anomalies[key] = value + for index, (key, value) in enumerate(data_pd.items()): + if anomaly_classification[index] in (u"regression", u"progression"): + anomalies[key] = value / multi anomalies_colors.append( - anomaly_color[anomaly_classification[idx]]) - anomalies_avgs.append(avgs[idx]) + anomaly_color[anomaly_classification[index]]) + anomalies_avgs.append(avgs_mpps[index]) anomalies_colors.extend([0.0, 0.5, 1.0]) # Create traces trace_samples = plgo.Scatter( x=xaxis, - y=data_y, + y=data_y_mpps, mode=u"markers", line={ u"width": 1 @@ -244,28 +301,60 @@ def _generate_trending_traces(in_data, job_name, build_info, u"symbol": u"circle", }, text=hover_text, - hoverinfo=u"text" + hoverinfo=u"text+name" ) traces = [trace_samples, ] - if show_trend_line: - trace_trend = plgo.Scatter( - x=xaxis, - y=avgs, - mode=u"lines", - line={ - u"shape": u"linear", - u"width": 1, - u"color": color, - }, - showlegend=False, - legendgroup=name, - name=f"{name}", - text=[f"trend: {int(avg):,}" for avg in avgs], - hoverinfo=u"text+name" - ) - traces.append(trace_trend) + trend_hover_text = list() + for idx in range(len(data_x)): + if incl_tests == u"pdr-lat": + trend_hover_str = ( + f"trend [s]: {avgs_mpps[idx]:.1e}
" + ) + else: + trend_hover_str = ( + f"trend [Mpps]: {avgs_mpps[idx]:.3f}
" + f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}" + ) + trend_hover_text.append(trend_hover_str) + trace_trend = plgo.Scatter( + x=xaxis, + y=avgs_mpps, + mode=u"lines", + line={ + u"shape": u"linear", + u"width": 1, + u"color": color, + }, + showlegend=False, + legendgroup=name, + name=f"{name}", + text=trend_hover_text, + hoverinfo=u"text+name" + ) + traces.append(trace_trend) + + if incl_tests == u"pdr-lat": + colorscale = [ + [0.00, u"green"], + [0.33, u"green"], + [0.33, u"white"], + [0.66, u"white"], + [0.66, u"red"], + [1.00, u"red"] + ] + ticktext = [u"Progression", u"Normal", u"Regression"] + else: + colorscale = [ + [0.00, u"red"], + [0.33, u"red"], + [0.33, u"white"], + [0.66, u"white"], + [0.66, u"green"], + [1.00, u"green"] + ] + ticktext = [u"Regression", u"Normal", u"Progression"] trace_anomalies = plgo.Scatter( x=list(anomalies.keys()), y=anomalies_avgs, @@ -278,14 +367,7 @@ def _generate_trending_traces(in_data, job_name, build_info, u"size": 15, u"symbol": u"circle-open", u"color": anomalies_colors, - u"colorscale": [ - [0.00, u"red"], - [0.33, u"red"], - [0.33, u"white"], - [0.66, u"white"], - [0.66, u"green"], - [1.00, u"green"] - ], + u"colorscale": colorscale, u"showscale": True, u"line": { u"width": 2 @@ -300,7 +382,7 @@ def _generate_trending_traces(in_data, job_name, build_info, }, u"tickmode": u"array", u"tickvals": [0.167, 0.500, 0.833], - u"ticktext": [u"Regression", u"Normal", u"Progression"], + u"ticktext": ticktext, u"ticks": u"", u"ticklen": 0, u"tickangle": -90, @@ -335,206 +417,306 @@ def _generate_all_charts(spec, input_data): :rtype: dict """ - logs = list() - - logs.append( - (u"INFO", f" Generating the chart {graph.get(u'title', u'')} ...") - ) + logging.info(f" Generating the chart {graph.get(u'title', u'')} ...") job_name = list(graph[u"data"].keys())[0] - csv_tbl = list() - res = dict() - # Transform the data - logs.append( - (u"INFO", - f" Creating the data set for the {graph.get(u'type', u'')} " - f"{graph.get(u'title', u'')}." - ) + logging.info( + f" Creating the data set for the {graph.get(u'type', u'')} " + f"{graph.get(u'title', u'')}." ) - if graph.get(u"include", None): - data = input_data.filter_tests_by_name( - graph, continue_on_error=True - ) - else: - data = input_data.filter_data(graph, continue_on_error=True) + data = input_data.filter_tests_by_name( + graph, + params=[u"type", u"result", u"throughput", u"latency", u"tags"], + continue_on_error=True + ) if data is None or data.empty: logging.error(u"No data.") return dict() - chart_data = dict() - chart_tags = dict() - for job, job_data in data.items(): - if job != job_name: - continue - for index, bld in job_data.items(): - for test_name, test in bld.items(): - if chart_data.get(test_name, None) is None: - chart_data[test_name] = OrderedDict() - try: - chart_data[test_name][int(index)] = \ - test[u"result"][u"receive-rate"] - chart_tags[test_name] = test.get(u"tags", None) - except (KeyError, TypeError): - pass - - # Add items to the csv table: - for tst_name, tst_data in chart_data.items(): - tst_lst = list() - for bld in builds_dict[job_name]: - itm = tst_data.get(int(bld), u'') - # CSIT-1180: Itm will be list, compute stats. - tst_lst.append(str(itm)) - csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n') - - # Generate traces: - traces = list() - index = 0 - groups = graph.get(u"groups", None) - visibility = list() - - if groups: - for group in groups: - visible = list() - for tag in group: + return_lst = list() + + for ttype in graph.get(u"test-type", (u"mrr", )): + for core in graph.get(u"core", tuple()): + csv_tbl = list() + csv_tbl_lat_1 = list() + csv_tbl_lat_2 = list() + res = dict() + chart_data = dict() + chart_tags = dict() + for item in graph.get(u"include", tuple()): + reg_ex = re.compile(str(item.format(core=core)).lower()) + for job, job_data in data.items(): + if job != job_name: + continue + for index, bld in job_data.items(): + for test_id, test in bld.items(): + if not re.match(reg_ex, str(test_id).lower()): + continue + if chart_data.get(test_id, None) is None: + chart_data[test_id] = OrderedDict() + try: + lat_1 = u"" + lat_2 = u"" + if ttype == u"mrr": + rate = test[u"result"][u"receive-rate"] + stdev = \ + test[u"result"][u"receive-stdev"] + elif ttype == u"ndr": + rate = \ + test["throughput"][u"NDR"][u"LOWER"] + stdev = float(u"nan") + elif ttype == u"pdr": + rate = \ + test["throughput"][u"PDR"][u"LOWER"] + stdev = float(u"nan") + lat_1 = test[u"latency"][u"PDR50"]\ + [u"direction1"][u"avg"] + lat_2 = test[u"latency"][u"PDR50"]\ + [u"direction2"][u"avg"] + else: + continue + chart_data[test_id][int(index)] = { + u"receive-rate": rate, + u"receive-stdev": stdev + } + if ttype == u"pdr": + chart_data[test_id][int(index)].update( + { + u"lat_1": lat_1, + u"lat_2": lat_2 + } + ) + chart_tags[test_id] = \ + test.get(u"tags", None) + except (KeyError, TypeError): + pass + + # Add items to the csv table: + for tst_name, tst_data in chart_data.items(): + tst_lst = list() + tst_lst_lat_1 = list() + tst_lst_lat_2 = list() + for bld in builds_dict[job_name]: + itm = tst_data.get(int(bld), dict()) + # CSIT-1180: Itm will be list, compute stats. + try: + tst_lst.append(str(itm.get(u"receive-rate", u""))) + if ttype == u"pdr": + tst_lst_lat_1.append( + str(itm.get(u"lat_1", u"")) + ) + tst_lst_lat_2.append( + str(itm.get(u"lat_2", u"")) + ) + except AttributeError: + tst_lst.append(u"") + if ttype == u"pdr": + tst_lst_lat_1.append(u"") + tst_lst_lat_2.append(u"") + csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n') + csv_tbl_lat_1.append( + f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n" + ) + csv_tbl_lat_2.append( + f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n" + ) + + # Generate traces: + traces = list() + traces_lat = list() + index = 0 + groups = graph.get(u"groups", None) + visibility = list() + + if groups: + for group in groups: + visible = list() + for tag in group: + for tst_name, test_data in chart_data.items(): + if not test_data: + logging.warning( + f"No data for the test {tst_name}" + ) + continue + if tag not in chart_tags[tst_name]: + continue + try: + trace, rslt = _generate_trending_traces( + test_data, + job_name=job_name, + build_info=build_info, + name=u'-'.join(tst_name.split(u'.')[-1]. + split(u'-')[2:-1]), + color=COLORS[index], + incl_tests=ttype + ) + except IndexError: + logging.error(f"Out of colors: index: " + f"{index}, test: {tst_name}") + index += 1 + continue + traces.extend(trace) + visible.extend( + [True for _ in range(len(trace))] + ) + res[tst_name] = rslt + index += 1 + break + visibility.append(visible) + else: for tst_name, test_data in chart_data.items(): if not test_data: - logs.append( - (u"WARNING", f"No data for the test {tst_name}") - ) + logging.warning(f"No data for the test {tst_name}") continue - if tag not in chart_tags[tst_name]: - continue - message = f"index: {index}, test: {tst_name}" try: trace, rslt = _generate_trending_traces( test_data, job_name=job_name, build_info=build_info, - name=u'-'.join(tst_name.split(u'.')[-1]. - split(u'-')[2:-1]), - color=COLORS[index]) + name=u'-'.join( + tst_name.split(u'.')[-1].split(u'-')[2:-1]), + color=COLORS[index], + incl_tests=ttype + ) + if ttype == u"pdr": + trace_lat, _ = _generate_trending_traces( + test_data, + job_name=job_name, + build_info=build_info, + name=u'-'.join( + tst_name.split(u'.')[-1].split( + u'-')[2:-1]), + color=COLORS[index], + incl_tests=u"pdr-lat" + ) + traces_lat.extend(trace_lat) except IndexError: - logs.append( - (u"ERROR", f"Out of colors: {message}") + logging.error( + f"Out of colors: index: " + f"{index}, test: {tst_name}" ) - logging.error(f"Out of colors: {message}") index += 1 continue traces.extend(trace) - visible.extend([True for _ in range(len(trace))]) res[tst_name] = rslt index += 1 - break - visibility.append(visible) - else: - for tst_name, test_data in chart_data.items(): - if not test_data: - logs.append( - (u"WARNING", f"No data for the test {tst_name}") + + if traces: + # Generate the chart: + try: + layout = deepcopy(graph[u"layout"]) + except KeyError as err: + logging.error(u"Finished with error: No layout defined") + logging.error(repr(err)) + return dict() + if groups: + show = list() + for i in range(len(visibility)): + visible = list() + for vis_idx, _ in enumerate(visibility): + for _ in range(len(visibility[vis_idx])): + visible.append(i == vis_idx) + show.append(visible) + + buttons = list() + buttons.append(dict( + label=u"All", + method=u"update", + args=[{u"visible": + [True for _ in range(len(show[0]))]}, ] + )) + for i in range(len(groups)): + try: + label = graph[u"group-names"][i] + except (IndexError, KeyError): + label = f"Group {i + 1}" + buttons.append(dict( + label=label, + method=u"update", + args=[{u"visible": show[i]}, ] + )) + + layout[u"updatemenus"] = list([ + dict( + active=0, + type=u"dropdown", + direction=u"down", + xanchor=u"left", + yanchor=u"bottom", + x=-0.12, + y=1.0, + buttons=buttons + ) + ]) + + name_file = ( + f"{spec.cpta[u'output-file']}/" + f"{graph[u'output-file-name']}.html" ) - continue - message = f"index: {index}, test: {tst_name}" - try: - trace, rslt = _generate_trending_traces( - test_data, - job_name=job_name, - build_info=build_info, - name=u'-'.join( - tst_name.split(u'.')[-1].split(u'-')[2:-1]), - color=COLORS[index]) - except IndexError: - logs.append((u"ERROR", f"Out of colors: {message}")) - logging.error(f"Out of colors: {message}") - index += 1 - continue - traces.extend(trace) - res[tst_name] = rslt - index += 1 - - if traces: - # Generate the chart: - try: - layout = deepcopy(graph[u"layout"]) - except KeyError as err: - logging.error(u"Finished with error: No layout defined") - logging.error(repr(err)) - return dict() - if groups: - show = list() - for i in range(len(visibility)): - visible = list() - for vis_idx, _ in enumerate(visibility): - for _ in range(len(visibility[vis_idx])): - visible.append(i == vis_idx) - show.append(visible) - - buttons = list() - buttons.append(dict( - label=u"All", - method=u"update", - args=[{u"visible": [True for _ in range(len(show[0]))]}, ] - )) - for i in range(len(groups)): + name_file = name_file.format(core=core, test_type=ttype) + + logging.info(f" Writing the file {name_file}") + plpl = plgo.Figure(data=traces, layout=layout) try: - label = graph[u"group-names"][i] - except (IndexError, KeyError): - label = f"Group {i + 1}" - buttons.append(dict( - label=label, - method=u"update", - args=[{u"visible": show[i]}, ] - )) - - layout[u"updatemenus"] = list([ - dict( - active=0, - type=u"dropdown", - direction=u"down", - xanchor=u"left", - yanchor=u"bottom", - x=-0.12, - y=1.0, - buttons=buttons + ploff.plot( + plpl, + show_link=False, + auto_open=False, + filename=name_file + ) + except plerr.PlotlyEmptyDataError: + logging.warning(u"No data for the plot. Skipped.") + + if traces_lat: + try: + layout = deepcopy(graph[u"layout"]) + layout[u"yaxis"][u"title"] = u"Latency [s]" + layout[u"yaxis"][u"tickformat"] = u".3s" + except KeyError as err: + logging.error(u"Finished with error: No layout defined") + logging.error(repr(err)) + return dict() + name_file = ( + f"{spec.cpta[u'output-file']}/" + f"{graph[u'output-file-name']}-lat.html" ) - ]) - - name_file = ( - f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}" - f"{spec.cpta[u'output-file-type']}") - - logs.append((u"INFO", f" Writing the file {name_file} ...")) - plpl = plgo.Figure(data=traces, layout=layout) - try: - ploff.plot(plpl, show_link=False, auto_open=False, - filename=name_file) - except plerr.PlotlyEmptyDataError: - logs.append((u"WARNING", u"No data for the plot. Skipped.")) - - for level, line in logs: - if level == u"INFO": - logging.info(line) - elif level == u"ERROR": - logging.error(line) - elif level == u"DEBUG": - logging.debug(line) - elif level == u"CRITICAL": - logging.critical(line) - elif level == u"WARNING": - logging.warning(line) - - return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res} + name_file = name_file.format(core=core, test_type=ttype) + + logging.info(f" Writing the file {name_file}") + plpl = plgo.Figure(data=traces_lat, layout=layout) + try: + ploff.plot( + plpl, + show_link=False, + auto_open=False, + filename=name_file + ) + except plerr.PlotlyEmptyDataError: + logging.warning(u"No data for the plot. Skipped.") + + return_lst.append( + { + u"job_name": job_name, + u"csv_table": csv_tbl, + u"csv_lat_1": csv_tbl_lat_1, + u"csv_lat_2": csv_tbl_lat_2, + u"results": res + } + ) + + return return_lst builds_dict = dict() - for job in spec.input[u"builds"].keys(): + for job, builds in spec.input.items(): if builds_dict.get(job, None) is None: builds_dict[job] = list() - for build in spec.input[u"builds"][job]: - status = build[u"status"] - if status not in (u"failed", u"not found", u"removed"): + for build in builds: + if build[u"status"] not in (u"failed", u"not found", u"removed", + None): builds_dict[job].append(str(build[u"build"])) # Create "build ID": "date" dict: @@ -556,35 +738,56 @@ def _generate_all_charts(spec, input_data): anomaly_classifications = dict() - # Create the header: + # Create the table header: csv_tables = dict() + csv_tables_l1 = dict() + csv_tables_l2 = dict() for job_name in builds_dict: if csv_tables.get(job_name, None) is None: csv_tables[job_name] = list() - header = u"Build Number:," + u",".join(builds_dict[job_name]) + u'\n' + if csv_tables_l1.get(job_name, None) is None: + csv_tables_l1[job_name] = list() + if csv_tables_l2.get(job_name, None) is None: + csv_tables_l2[job_name] = list() + header = f"Build Number:,{u','.join(builds_dict[job_name])}\n" csv_tables[job_name].append(header) + csv_tables_l1[job_name].append(header) + csv_tables_l2[job_name].append(header) build_dates = [x[0] for x in build_info[job_name].values()] - header = u"Build Date:," + u",".join(build_dates) + u'\n' + header = f"Build Date:,{u','.join(build_dates)}\n" csv_tables[job_name].append(header) + csv_tables_l1[job_name].append(header) + csv_tables_l2[job_name].append(header) versions = [x[1] for x in build_info[job_name].values()] - header = u"Version:," + u",".join(versions) + u'\n' + header = f"Version:,{u','.join(versions)}\n" + csv_tables[job_name].append(header) + csv_tables_l1[job_name].append(header) + csv_tables_l2[job_name].append(header) + testbed = [x[2] for x in build_info[job_name].values()] + header = f"Test bed:,{u','.join(testbed)}\n" csv_tables[job_name].append(header) + csv_tables_l1[job_name].append(header) + csv_tables_l2[job_name].append(header) for chart in spec.cpta[u"plots"]: - result = _generate_chart(chart) - if not result: + results = _generate_chart(chart) + if not results: continue - csv_tables[result[u"job_name"]].extend(result[u"csv_table"]) + for result in results: + csv_tables[result[u"job_name"]].extend(result[u"csv_table"]) + csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"]) + csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"]) - if anomaly_classifications.get(result[u"job_name"], None) is None: - anomaly_classifications[result[u"job_name"]] = dict() - anomaly_classifications[result[u"job_name"]].update(result[u"results"]) + if anomaly_classifications.get(result[u"job_name"], None) is None: + anomaly_classifications[result[u"job_name"]] = dict() + anomaly_classifications[result[u"job_name"]].\ + update(result[u"results"]) # Write the tables: for job_name, csv_table in csv_tables.items(): - file_name = spec.cpta[u"output-file"] + u"-" + job_name + u"-trending" - with open(f"{file_name}.csv", u"w") as file_handler: + file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending" + with open(f"{file_name}.csv", u"wt") as file_handler: file_handler.writelines(csv_table) txt_table = None @@ -610,9 +813,18 @@ def _generate_all_charts(spec, input_data): ) line_nr += 1 txt_table.align[u"Build Number:"] = u"l" - with open(f"{file_name}.txt", u"w") as txt_file: + with open(f"{file_name}.txt", u"wt") as txt_file: txt_file.write(str(txt_table)) + for job_name, csv_table in csv_tables_l1.items(): + file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1" + with open(f"{file_name}.csv", u"wt") as file_handler: + file_handler.writelines(csv_table) + for job_name, csv_table in csv_tables_l2.items(): + file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2" + with open(f"{file_name}.csv", u"wt") as file_handler: + file_handler.writelines(csv_table) + # Evaluate result: if anomaly_classifications: result = u"PASS"