X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_CPTA.py;h=1e7719153fd7b1d3cccb0310fe2201a52bbdff88;hp=d4ac06d09fb858600a63ef5fa106f4823d4a6819;hb=0ca4a9ec1a8fc53a679b1c635a6e1b6afae0299d;hpb=2e63ef13b419da1198439617e66cb0f1cfe6be65 diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py index d4ac06d09f..1e7719153f 100644 --- a/resources/tools/presentation/generator_CPTA.py +++ b/resources/tools/presentation/generator_CPTA.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -22,10 +22,10 @@ import prettytable import plotly.offline as ploff import plotly.graph_objs as plgo import plotly.exceptions as plerr -import pandas as pd from collections import OrderedDict from datetime import datetime +from copy import deepcopy from utils import archive_input_data, execute_command, \ classify_anomalies, Worker @@ -44,11 +44,69 @@ THEME_OVERRIDES = """/* override table width restrictions */ .wy-nav-content { max-width: 1200px !important; } +.rst-content blockquote { + margin-left: 0px; + line-height: 18px; + margin-bottom: 0px; +} +.wy-menu-vertical a { + display: inline-block; + line-height: 18px; + padding: 0 2em; + display: block; + position: relative; + font-size: 90%; + color: #d9d9d9 +} +.wy-menu-vertical li.current a { + color: gray; + border-right: solid 1px #c9c9c9; + padding: 0 3em; +} +.wy-menu-vertical li.toctree-l2.current > a { + background: #c9c9c9; + padding: 0 3em; +} +.wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a { + display: block; + background: #c9c9c9; + padding: 0 4em; +} +.wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a { + display: block; + background: #bdbdbd; + padding: 0 5em; +} +.wy-menu-vertical li.on a, .wy-menu-vertical li.current > a { + color: #404040; + padding: 0 2em; + font-weight: bold; + position: relative; + background: #fcfcfc; + border: none; + border-top-width: medium; + border-bottom-width: medium; + border-top-style: none; + border-bottom-style: none; + border-top-color: currentcolor; + border-bottom-color: currentcolor; + padding-left: 2em -4px; +} """ COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink", "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black", - "Violet", "Blue", "Yellow"] + "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson", + "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod", + "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon", + "MediumSeaGreen", "SeaGreen", "LightSlateGrey", + "SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink", + "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black", + "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson", + "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod", + "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon", + "MediumSeaGreen", "SeaGreen", "LightSlateGrey" + ] def generate_cpta(spec, data): @@ -67,7 +125,7 @@ def generate_cpta(spec, data): ret_code = _generate_all_charts(spec, data) cmd = HTML_BUILDER.format( - date=datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'), + date=datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'), working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"], build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"]) execute_command(cmd) @@ -116,23 +174,41 @@ def _generate_trending_traces(in_data, job_name, build_info, hover_text = list() xaxis = list() for idx in data_x: + date = build_info[job_name][str(idx)][0] + hover_str = ("date: {date}
" + "value: {value:,}
" + "{sut}-ref: {build}
" + "csit-ref: mrr-{period}-build-{build_nr}
" + "testbed: {testbed}") if "dpdk" in job_name: - hover_text.append("dpdk-ref: {0}
csit-ref: mrr-weekly-build-{1}". - format(build_info[job_name][str(idx)][1]. - rsplit('~', 1)[0], idx)) + hover_text.append(hover_str.format( + date=date, + value=int(in_data[idx].avg), + sut="dpdk", + build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0], + period="weekly", + build_nr=idx, + testbed=build_info[job_name][str(idx)][2])) elif "vpp" in job_name: - hover_text.append("vpp-ref: {0}
csit-ref: mrr-daily-build-{1}". - format(build_info[job_name][str(idx)][1]. - rsplit('~', 1)[0], idx)) - date = build_info[job_name][str(idx)][0] + hover_text.append(hover_str.format( + date=date, + value=int(in_data[idx].avg), + sut="vpp", + build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0], + period="daily", + build_nr=idx, + testbed=build_info[job_name][str(idx)][2])) + xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), int(date[9:11]), int(date[12:]))) - data_pd = pd.Series(data_y, index=xaxis) + data_pd = OrderedDict() + for key, value in zip(xaxis, data_y): + data_pd[key] = value anomaly_classification, avgs = classify_anomalies(data_pd) - anomalies = pd.Series() + anomalies = OrderedDict() anomalies_colors = list() anomalies_avgs = list() anomaly_color = { @@ -141,11 +217,10 @@ def _generate_trending_traces(in_data, job_name, build_info, "progression": 1.0 } if anomaly_classification: - for idx, item in enumerate(data_pd.items()): + for idx, (key, value) in enumerate(data_pd.iteritems()): if anomaly_classification[idx] in \ ("outlier", "regression", "progression"): - anomalies = anomalies.append(pd.Series([item[1], ], - index=[item[0], ])) + anomalies[key] = value anomalies_colors.append( anomaly_color[anomaly_classification[idx]]) anomalies_avgs.append(avgs[idx]) @@ -155,7 +230,7 @@ def _generate_trending_traces(in_data, job_name, build_info, trace_samples = plgo.Scatter( x=xaxis, - y=data_y, + y=[y.avg for y in data_y], mode='markers', line={ "width": 1 @@ -169,7 +244,7 @@ def _generate_trending_traces(in_data, job_name, build_info, "symbol": "circle", }, text=hover_text, - hoverinfo="x+y+text+name" + hoverinfo="text" ) traces = [trace_samples, ] @@ -185,7 +260,9 @@ def _generate_trending_traces(in_data, job_name, build_info, }, showlegend=False, legendgroup=name, - name='{name}-trend'.format(name=name) + name='{name}'.format(name=name), + text=["trend: {0:,}".format(int(avg)) for avg in avgs], + hoverinfo="text+name" ) traces.append(trace_trend) @@ -271,6 +348,7 @@ def _generate_all_charts(spec, input_data): return chart_data = dict() + chart_tags = dict() for job, job_data in data.iteritems(): if job != job_name: continue @@ -280,7 +358,8 @@ def _generate_all_charts(spec, input_data): chart_data[test_name] = OrderedDict() try: chart_data[test_name][int(index)] = \ - test["result"]["throughput"] + test["result"]["receive-rate"] + chart_tags[test_name] = test.get("tags", None) except (KeyError, TypeError): pass @@ -289,39 +368,130 @@ def _generate_all_charts(spec, input_data): tst_lst = list() for bld in builds_dict[job_name]: itm = tst_data.get(int(bld), '') + if not isinstance(itm, str): + itm = itm.avg tst_lst.append(str(itm)) csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n') + # Generate traces: traces = list() - win_size = 14 index = 0 - for test_name, test_data in chart_data.items(): - if not test_data: - logs.append(("WARNING", "No data for the test '{0}'". - format(test_name))) - continue - test_name = test_name.split('.')[-1] - trace, rslt = _generate_trending_traces( - test_data, - job_name=job_name, - build_info=build_info, - name='-'.join(test_name.split('-')[3:-1]), - color=COLORS[index]) - traces.extend(trace) - res.append(rslt) - index += 1 + groups = graph.get("groups", None) + visibility = list() + + if groups: + for group in groups: + visible = list() + for tag in group: + for test_name, test_data in chart_data.items(): + if not test_data: + logs.append(("WARNING", + "No data for the test '{0}'". + format(test_name))) + continue + if tag in chart_tags[test_name]: + message = "index: {index}, test: {test}".format( + index=index, test=test_name) + test_name = test_name.split('.')[-1] + try: + trace, rslt = _generate_trending_traces( + test_data, + job_name=job_name, + build_info=build_info, + name='-'.join(test_name.split('-')[2:-1]), + color=COLORS[index]) + except IndexError: + message = "Out of colors: {}".format(message) + logs.append(("ERROR", message)) + logging.error(message) + index += 1 + continue + traces.extend(trace) + visible.extend([True for _ in range(len(trace))]) + res.append(rslt) + index += 1 + break + visibility.append(visible) + else: + for test_name, test_data in chart_data.items(): + if not test_data: + logs.append(("WARNING", "No data for the test '{0}'". + format(test_name))) + continue + message = "index: {index}, test: {test}".format( + index=index, test=test_name) + test_name = test_name.split('.')[-1] + try: + trace, rslt = _generate_trending_traces( + test_data, + job_name=job_name, + build_info=build_info, + name='-'.join(test_name.split('-')[2:-1]), + color=COLORS[index]) + except IndexError: + message = "Out of colors: {}".format(message) + logs.append(("ERROR", message)) + logging.error(message) + index += 1 + continue + traces.extend(trace) + res.append(rslt) + index += 1 if traces: # Generate the chart: - graph["layout"]["xaxis"]["title"] = \ - graph["layout"]["xaxis"]["title"].format(job=job_name) + try: + layout = deepcopy(graph["layout"]) + except KeyError as err: + logging.error("Finished with error: No layout defined") + logging.error(repr(err)) + return + if groups: + show = list() + for i in range(len(visibility)): + visible = list() + for r in range(len(visibility)): + for _ in range(len(visibility[r])): + visible.append(i == r) + show.append(visible) + + buttons = list() + buttons.append(dict( + label="All", + method="update", + args=[{"visible": [True for _ in range(len(show[0]))]}, ] + )) + for i in range(len(groups)): + try: + label = graph["group-names"][i] + except (IndexError, KeyError): + label = "Group {num}".format(num=i + 1) + buttons.append(dict( + label=label, + method="update", + args=[{"visible": show[i]}, ] + )) + + layout['updatemenus'] = list([ + dict( + active=0, + type="dropdown", + direction="down", + xanchor="left", + yanchor="bottom", + x=-0.12, + y=1.0, + buttons=buttons + ) + ]) + name_file = "{0}-{1}{2}".format(spec.cpta["output-file"], graph["output-file-name"], spec.cpta["output-file-type"]) logs.append(("INFO", " Writing the file '{0}' ...". format(name_file))) - plpl = plgo.Figure(data=traces, layout=graph["layout"]) + plpl = plgo.Figure(data=traces, layout=layout) try: ploff.plot(plpl, show_link=False, auto_open=False, filename=name_file) @@ -342,18 +512,25 @@ def _generate_all_charts(spec, input_data): builds_dict[job] = list() for build in spec.input["builds"][job]: status = build["status"] - if status != "failed" and status != "not found": + if status != "failed" and status != "not found" and \ + status != "removed": builds_dict[job].append(str(build["build"])) # Create "build ID": "date" dict: build_info = dict() + tb_tbl = spec.environment.get("testbeds", None) for job_name, job_data in builds_dict.items(): if build_info.get(job_name, None) is None: build_info[job_name] = OrderedDict() for build in job_data: + testbed = "" + tb_ip = input_data.metadata(job_name, build).get("testbed", "") + if tb_ip and tb_tbl: + testbed = tb_tbl.get(tb_ip, "") build_info[job_name][build] = ( input_data.metadata(job_name, build).get("generated", ""), - input_data.metadata(job_name, build).get("version", "") + input_data.metadata(job_name, build).get("version", ""), + testbed ) work_queue = multiprocessing.JoinableQueue()