1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
20 from collections import OrderedDict
21 from datetime import datetime
22 from copy import deepcopy
25 import plotly.offline as ploff
26 import plotly.graph_objs as plgo
27 import plotly.exceptions as plerr
29 from pal_utils import archive_input_data, execute_command, classify_anomalies
32 # Command to build the html format of the report
33 HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
36 u'-D version="{date}" ' \
40 # .css file for the html format of the report
41 THEME_OVERRIDES = u"""/* override table width restrictions */
43 max-width: 1200px !important;
45 .rst-content blockquote {
51 display: inline-block;
59 .wy-menu-vertical li.current a {
61 border-right: solid 1px #c9c9c9;
64 .wy-menu-vertical li.toctree-l2.current > a {
68 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
73 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
78 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
85 border-top-width: medium;
86 border-bottom-width: medium;
87 border-top-style: none;
88 border-bottom-style: none;
89 border-top-color: currentcolor;
90 border-bottom-color: currentcolor;
91 padding-left: 2em -4px;
96 u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
97 u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
98 u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
99 u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
100 u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
101 u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey",
102 u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
103 u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
104 u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
105 u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
106 u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
107 u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey"
111 def generate_cpta(spec, data):
112 """Generate all formats and versions of the Continuous Performance Trending
115 :param spec: Specification read from the specification file.
116 :param data: Full data set.
117 :type spec: Specification
118 :type data: InputData
121 logging.info(u"Generating the Continuous Performance Trending and Analysis "
124 ret_code = _generate_all_charts(spec, data)
126 cmd = HTML_BUILDER.format(
127 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
128 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
129 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
132 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
134 css_file.write(THEME_OVERRIDES)
136 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
138 css_file.write(THEME_OVERRIDES)
140 if spec.configuration.get(u"archive-inputs", True):
141 archive_input_data(spec)
143 logging.info(u"Done.")
148 def _generate_trending_traces(in_data, job_name, build_info,
149 name=u"", color=u""):
150 """Generate the trending traces:
152 - outliers, regress, progress
153 - average of normal samples (trending line)
155 :param in_data: Full data set.
156 :param job_name: The name of job which generated the data.
157 :param build_info: Information about the builds.
158 :param name: Name of the plot
159 :param color: Name of the color for the plot.
160 :type in_data: OrderedDict
162 :type build_info: dict
165 :returns: Generated traces (list) and the evaluated result.
166 :rtype: tuple(traces, result)
169 data_x = list(in_data.keys())
172 data_y_stdev = list()
173 for item in in_data.values():
174 data_y_pps.append(float(item[u"receive-rate"]))
175 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
176 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
180 for index, key in enumerate(data_x):
182 date = build_info[job_name][str_key][0]
183 hover_str = (u"date: {date}<br>"
184 u"average [Mpps]: {value:.3f}<br>"
185 u"stdev [Mpps]: {stdev:.3f}<br>"
186 u"{sut}-ref: {build}<br>"
187 u"csit-ref: mrr-{period}-build-{build_nr}<br>"
188 u"testbed: {testbed}")
189 if u"dpdk" in job_name:
190 hover_text.append(hover_str.format(
192 value=data_y_mpps[index],
193 stdev=data_y_stdev[index],
195 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
198 testbed=build_info[job_name][str_key][2]))
199 elif u"vpp" in job_name:
200 hover_text.append(hover_str.format(
202 value=data_y_mpps[index],
203 stdev=data_y_stdev[index],
205 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
208 testbed=build_info[job_name][str_key][2]))
210 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
211 int(date[9:11]), int(date[12:])))
213 data_pd = OrderedDict()
214 for key, value in zip(xaxis, data_y_pps):
217 anomaly_classification, avgs_pps, stdevs_pps = classify_anomalies(data_pd)
218 avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
219 stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
221 anomalies = OrderedDict()
222 anomalies_colors = list()
223 anomalies_avgs = list()
229 if anomaly_classification:
230 for index, (key, value) in enumerate(data_pd.items()):
231 if anomaly_classification[index] in (u"regression", u"progression"):
232 anomalies[key] = value / 1e6
233 anomalies_colors.append(
234 anomaly_color[anomaly_classification[index]])
235 anomalies_avgs.append(avgs_mpps[index])
236 anomalies_colors.extend([0.0, 0.5, 1.0])
240 trace_samples = plgo.Scatter(
253 u"symbol": u"circle",
256 hoverinfo=u"text+name"
258 traces = [trace_samples, ]
260 trend_hover_text = list()
261 for idx in range(len(data_x)):
263 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
264 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
266 trend_hover_text.append(trend_hover_str)
268 trace_trend = plgo.Scatter(
280 text=trend_hover_text,
281 hoverinfo=u"text+name"
283 traces.append(trace_trend)
285 trace_anomalies = plgo.Scatter(
286 x=list(anomalies.keys()),
292 name=f"{name}-anomalies",
295 u"symbol": u"circle-open",
296 u"color": anomalies_colors,
312 u"title": u"Circles Marking Data Classification",
313 u"titleside": u"right",
317 u"tickmode": u"array",
318 u"tickvals": [0.167, 0.500, 0.833],
319 u"ticktext": [u"Regression", u"Normal", u"Progression"],
327 traces.append(trace_anomalies)
329 if anomaly_classification:
330 return traces, anomaly_classification[-1]
335 def _generate_all_charts(spec, input_data):
336 """Generate all charts specified in the specification file.
338 :param spec: Specification.
339 :param input_data: Full data set.
340 :type spec: Specification
341 :type input_data: InputData
344 def _generate_chart(graph):
345 """Generates the chart.
347 :param graph: The graph to be generated
349 :returns: Dictionary with the job name, csv table with results and
350 list of tests classification results.
354 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
356 job_name = list(graph[u"data"].keys())[0]
363 f" Creating the data set for the {graph.get(u'type', u'')} "
364 f"{graph.get(u'title', u'')}."
367 if graph.get(u"include", None):
368 data = input_data.filter_tests_by_name(
370 params=[u"type", u"result", u"tags"],
371 continue_on_error=True
374 data = input_data.filter_data(
376 params=[u"type", u"result", u"tags"],
377 continue_on_error=True)
379 if data is None or data.empty:
380 logging.error(u"No data.")
385 for job, job_data in data.items():
388 for index, bld in job_data.items():
389 for test_name, test in bld.items():
390 if chart_data.get(test_name, None) is None:
391 chart_data[test_name] = OrderedDict()
393 chart_data[test_name][int(index)] = {
394 u"receive-rate": test[u"result"][u"receive-rate"],
395 u"receive-stdev": test[u"result"][u"receive-stdev"]
397 chart_tags[test_name] = test.get(u"tags", None)
398 except (KeyError, TypeError):
401 # Add items to the csv table:
402 for tst_name, tst_data in chart_data.items():
404 for bld in builds_dict[job_name]:
405 itm = tst_data.get(int(bld), dict())
406 # CSIT-1180: Itm will be list, compute stats.
408 tst_lst.append(str(itm.get(u"receive-rate", u"")))
409 except AttributeError:
411 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
416 groups = graph.get(u"groups", None)
423 for tst_name, test_data in chart_data.items():
425 logging.warning(f"No data for the test {tst_name}")
427 if tag not in chart_tags[tst_name]:
430 trace, rslt = _generate_trending_traces(
433 build_info=build_info,
434 name=u'-'.join(tst_name.split(u'.')[-1].
438 logging.error(f"Out of colors: index: "
439 f"{index}, test: {tst_name}")
443 visible.extend([True for _ in range(len(trace))])
447 visibility.append(visible)
449 for tst_name, test_data in chart_data.items():
451 logging.warning(f"No data for the test {tst_name}")
454 trace, rslt = _generate_trending_traces(
457 build_info=build_info,
459 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
463 f"Out of colors: index: {index}, test: {tst_name}"
472 # Generate the chart:
474 layout = deepcopy(graph[u"layout"])
475 except KeyError as err:
476 logging.error(u"Finished with error: No layout defined")
477 logging.error(repr(err))
481 for i in range(len(visibility)):
483 for vis_idx, _ in enumerate(visibility):
484 for _ in range(len(visibility[vis_idx])):
485 visible.append(i == vis_idx)
492 args=[{u"visible": [True for _ in range(len(show[0]))]}, ]
494 for i in range(len(groups)):
496 label = graph[u"group-names"][i]
497 except (IndexError, KeyError):
498 label = f"Group {i + 1}"
502 args=[{u"visible": show[i]}, ]
505 layout[u"updatemenus"] = list([
519 f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
520 f"{spec.cpta[u'output-file-type']}")
522 logging.info(f" Writing the file {name_file} ...")
523 plpl = plgo.Figure(data=traces, layout=layout)
525 ploff.plot(plpl, show_link=False, auto_open=False,
527 except plerr.PlotlyEmptyDataError:
528 logging.warning(u"No data for the plot. Skipped.")
530 return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
533 for job in spec.input[u"builds"].keys():
534 if builds_dict.get(job, None) is None:
535 builds_dict[job] = list()
536 for build in spec.input[u"builds"][job]:
537 status = build[u"status"]
538 if status not in (u"failed", u"not found", u"removed", None):
539 builds_dict[job].append(str(build[u"build"]))
541 # Create "build ID": "date" dict:
543 tb_tbl = spec.environment.get(u"testbeds", None)
544 for job_name, job_data in builds_dict.items():
545 if build_info.get(job_name, None) is None:
546 build_info[job_name] = OrderedDict()
547 for build in job_data:
549 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
551 testbed = tb_tbl.get(tb_ip, u"")
552 build_info[job_name][build] = (
553 input_data.metadata(job_name, build).get(u"generated", u""),
554 input_data.metadata(job_name, build).get(u"version", u""),
558 anomaly_classifications = dict()
560 # Create the table header:
562 for job_name in builds_dict:
563 if csv_tables.get(job_name, None) is None:
564 csv_tables[job_name] = list()
565 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
566 csv_tables[job_name].append(header)
567 build_dates = [x[0] for x in build_info[job_name].values()]
568 header = f"Build Date:,{u','.join(build_dates)}\n"
569 csv_tables[job_name].append(header)
570 versions = [x[1] for x in build_info[job_name].values()]
571 header = f"Version:,{u','.join(versions)}\n"
572 csv_tables[job_name].append(header)
574 for chart in spec.cpta[u"plots"]:
575 result = _generate_chart(chart)
579 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
581 if anomaly_classifications.get(result[u"job_name"], None) is None:
582 anomaly_classifications[result[u"job_name"]] = dict()
583 anomaly_classifications[result[u"job_name"]].update(result[u"results"])
586 for job_name, csv_table in csv_tables.items():
587 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
588 with open(f"{file_name}.csv", u"wt") as file_handler:
589 file_handler.writelines(csv_table)
592 with open(f"{file_name}.csv", u"rt") as csv_file:
593 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
595 for row in csv_content:
596 if txt_table is None:
597 txt_table = prettytable.PrettyTable(row)
600 for idx, item in enumerate(row):
602 row[idx] = str(round(float(item) / 1000000, 2))
606 txt_table.add_row(row)
607 # PrettyTable raises Exception
608 except Exception as err:
610 f"Error occurred while generating TXT table:\n{err}"
613 txt_table.align[u"Build Number:"] = u"l"
614 with open(f"{file_name}.txt", u"wt") as txt_file:
615 txt_file.write(str(txt_table))
618 if anomaly_classifications:
620 for job_name, job_data in anomaly_classifications.items():
622 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
623 with open(file_name, u'w') as txt_file:
624 for test_name, classification in job_data.items():
625 if classification == u"regression":
626 txt_file.write(test_name + u'\n')
627 if classification in (u"regression", u"outlier"):
630 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
631 with open(file_name, u'w') as txt_file:
632 for test_name, classification in job_data.items():
633 if classification == u"progression":
634 txt_file.write(test_name + u'\n')
638 logging.info(f"Partial results: {anomaly_classifications}")
639 logging.info(f"Result: {result}")